From 5806ddd7948cc14b8d4b04e25b2831b37fee720d Mon Sep 17 00:00:00 2001 From: Qiuxu Zhuo Date: Wed, 13 Sep 2017 18:42:14 +0800 Subject: [PATCH 0001/3326] EDAC, sb_edac: Don't create a second memory controller if HA1 is not present commit 15cc3ae001873845b5d842e212478a6570c7d938 upstream. Yi Zhang reported the following failure on a 2-socket Haswell (E5-2603v3) server (DELL PowerEdge 730xd): EDAC sbridge: Some needed devices are missing EDAC MC: Removed device 0 for sb_edac.c Haswell SrcID#0_Ha#0: DEV 0000:7f:12.0 EDAC MC: Removed device 1 for sb_edac.c Haswell SrcID#1_Ha#0: DEV 0000:ff:12.0 EDAC sbridge: Couldn't find mci handler EDAC sbridge: Couldn't find mci handler EDAC sbridge: Failed to register device with error -19. The refactored sb_edac driver creates the IMC1 (the 2nd memory controller) if any IMC1 device is present. In this case only HA1_TA of IMC1 was present, but the driver expected to find HA1/HA1_TM/HA1_TAD[0-3] devices too, leading to the above failure. The document [1] says the 'E5-2603 v3' CPU has 4 memory channels max. Yi Zhang inserted one DIMM per channel for each CPU, and did random error address injection test with this patch: 4024 addresses fell in TOLM hole area 12715 addresses fell in CPU_SrcID#0_Ha#0_Chan#0_DIMM#0 12774 addresses fell in CPU_SrcID#0_Ha#0_Chan#1_DIMM#0 12798 addresses fell in CPU_SrcID#0_Ha#0_Chan#2_DIMM#0 12913 addresses fell in CPU_SrcID#0_Ha#0_Chan#3_DIMM#0 12674 addresses fell in CPU_SrcID#1_Ha#0_Chan#0_DIMM#0 12686 addresses fell in CPU_SrcID#1_Ha#0_Chan#1_DIMM#0 12882 addresses fell in CPU_SrcID#1_Ha#0_Chan#2_DIMM#0 12934 addresses fell in CPU_SrcID#1_Ha#0_Chan#3_DIMM#0 106400 addresses were injected totally. The test result shows that all the 4 channels belong to IMC0 per CPU, so the server really only has one IMC per CPU. In the 1st page of chapter 2 in datasheet [2], it also says 'E5-2600 v3' implements either one or two IMCs. For CPUs with one IMC, IMC1 is not used and should be ignored. Thus, do not create a second memory controller if the key HA1 is absent. [1] http://ark.intel.com/products/83349/Intel-Xeon-Processor-E5-2603-v3-15M-Cache-1_60-GHz [2] https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e5-v3-datasheet-vol-2.pdf Reported-and-tested-by: Yi Zhang Signed-off-by: Qiuxu Zhuo Cc: Tony Luck Cc: linux-edac Link: http://lkml.kernel.org/r/20170913104214.7325-1-qiuxu.zhuo@intel.com [ Massage commit message. ] Signed-off-by: Borislav Petkov Signed-off-by: Greg Kroah-Hartman --- drivers/edac/sb_edac.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index dc059165401164..6241fa787d6646 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -462,6 +462,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = { static const struct pci_id_descr pci_dev_descr_ibridge[] = { /* Processor Home Agent */ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) }, /* Memory controller */ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) }, @@ -472,7 +473,6 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = { { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) }, /* Optional, mode 2HA */ - { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) }, @@ -2291,6 +2291,13 @@ static int sbridge_get_onedevice(struct pci_dev **prev, next_imc: sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev); if (!sbridge_dev) { + /* If the HA1 wasn't found, don't create EDAC second memory controller */ + if (dev_descr->dom == IMC1 && devno != 1) { + edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n", + PCI_VENDOR_ID_INTEL, dev_descr->dev_id); + pci_dev_put(pdev); + return 0; + } if (dev_descr->dom == SOCK) goto out_imc; From 34c45c1ee883d080f2fc9a668714ae317c952f6b Mon Sep 17 00:00:00 2001 From: Adam Wallis Date: Thu, 2 Nov 2017 08:53:30 -0400 Subject: [PATCH 0002/3326] dmaengine: dmatest: warn user when dma test times out commit a9df21e34b422f79d9a9fa5c3eff8c2a53491be6 upstream. Commit adfa543e7314 ("dmatest: don't use set_freezable_with_signal()") introduced a bug (that is in fact documented by the patch commit text) that leaves behind a dangling pointer. Since the done_wait structure is allocated on the stack, future invocations to the DMATEST can produce undesirable results (e.g., corrupted spinlocks). Ideally, this would be cleaned up in the thread handler, but at the very least, the kernel is left in a very precarious scenario that can lead to some long debug sessions when the crash comes later. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=197605 Signed-off-by: Adam Wallis Signed-off-by: Vinod Koul Signed-off-by: Greg Kroah-Hartman --- drivers/dma/dmatest.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 34ff53290b0378..47edc7fbf91f52 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -702,6 +702,7 @@ static int dmatest_func(void *data) * free it this time?" dancing. For now, just * leave it dangling. */ + WARN(1, "dmatest: Kernel stack may be corrupted!!\n"); dmaengine_unmap_put(um); result("test timed out", total_tests, src_off, dst_off, len, 0); From 16edf1a6a8a52040841e3e875c76bad0aa28f85f Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 9 Oct 2017 20:14:48 +0200 Subject: [PATCH 0003/3326] media: imon: Fix null-ptr-deref in imon_probe commit 58fd55e838276a0c13d1dc7c387f90f25063cbf3 upstream. It seems that the return value of usb_ifnum_to_if() can be NULL and needs to be checked. Signed-off-by: Arvind Yadav Tested-by: Andrey Konovalov Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/media/rc/imon.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 7b3f31cc63d250..0c46155a8e9daa 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -2517,6 +2517,11 @@ static int imon_probe(struct usb_interface *interface, mutex_lock(&driver_lock); first_if = usb_ifnum_to_if(usbdev, 0); + if (!first_if) { + ret = -ENODEV; + goto fail; + } + first_if_ctx = usb_get_intfdata(first_if); if (ifnum == 0) { From 75d9dd294e2a41ddce75cc02d1675a9cf40873fc Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 2 Nov 2017 10:38:21 -0400 Subject: [PATCH 0004/3326] media: dib0700: fix invalid dvb_detach argument commit eb0c19942288569e0ae492476534d5a485fb8ab4 upstream. dvb_detach(arg) calls symbol_put_addr(arg), where arg should be a pointer to a function. Right now a pointer to state->dib7000p_ops is passed to dvb_detach(), which causes a BUG() in symbol_put_addr() as discovered by syzkaller. Pass state->dib7000p_ops.set_wbd_ref instead. ------------[ cut here ]------------ kernel BUG at kernel/module.c:1081! invalid opcode: 0000 [#1] PREEMPT SMP KASAN Modules linked in: CPU: 1 PID: 1151 Comm: kworker/1:1 Tainted: G W 4.14.0-rc1-42251-gebb2c2437d80 #224 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Workqueue: usb_hub_wq hub_event task: ffff88006a336300 task.stack: ffff88006a7c8000 RIP: 0010:symbol_put_addr+0x54/0x60 kernel/module.c:1083 RSP: 0018:ffff88006a7ce210 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffff880062a8d190 RCX: 0000000000000000 RDX: dffffc0000000020 RSI: ffffffff85876d60 RDI: ffff880062a8d190 RBP: ffff88006a7ce218 R08: 1ffff1000d4f9c12 R09: 1ffff1000d4f9ae4 R10: 1ffff1000d4f9bed R11: 0000000000000000 R12: ffff880062a8d180 R13: 00000000ffffffed R14: ffff880062a8d190 R15: ffff88006947c000 FS: 0000000000000000(0000) GS:ffff88006c900000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f6416532000 CR3: 00000000632f5000 CR4: 00000000000006e0 Call Trace: stk7070p_frontend_attach+0x515/0x610 drivers/media/usb/dvb-usb/dib0700_devices.c:1013 dvb_usb_adapter_frontend_init+0x32b/0x660 drivers/media/usb/dvb-usb/dvb-usb-dvb.c:286 dvb_usb_adapter_init drivers/media/usb/dvb-usb/dvb-usb-init.c:86 dvb_usb_init drivers/media/usb/dvb-usb/dvb-usb-init.c:162 dvb_usb_device_init+0xf70/0x17f0 drivers/media/usb/dvb-usb/dvb-usb-init.c:277 dib0700_probe+0x171/0x5a0 drivers/media/usb/dvb-usb/dib0700_core.c:886 usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361 really_probe drivers/base/dd.c:413 driver_probe_device+0x610/0xa00 drivers/base/dd.c:557 __device_attach_driver+0x230/0x290 drivers/base/dd.c:653 bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463 __device_attach+0x26e/0x3d0 drivers/base/dd.c:710 device_initial_probe+0x1f/0x30 drivers/base/dd.c:757 bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523 device_add+0xd0b/0x1660 drivers/base/core.c:1835 usb_set_configuration+0x104e/0x1870 drivers/usb/core/message.c:1932 generic_probe+0x73/0xe0 drivers/usb/core/generic.c:174 usb_probe_device+0xaf/0xe0 drivers/usb/core/driver.c:266 really_probe drivers/base/dd.c:413 driver_probe_device+0x610/0xa00 drivers/base/dd.c:557 __device_attach_driver+0x230/0x290 drivers/base/dd.c:653 bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463 __device_attach+0x26e/0x3d0 drivers/base/dd.c:710 device_initial_probe+0x1f/0x30 drivers/base/dd.c:757 bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523 device_add+0xd0b/0x1660 drivers/base/core.c:1835 usb_new_device+0x7b8/0x1020 drivers/usb/core/hub.c:2457 hub_port_connect drivers/usb/core/hub.c:4903 hub_port_connect_change drivers/usb/core/hub.c:5009 port_event drivers/usb/core/hub.c:5115 hub_event+0x194d/0x3740 drivers/usb/core/hub.c:5195 process_one_work+0xc7f/0x1db0 kernel/workqueue.c:2119 worker_thread+0x221/0x1850 kernel/workqueue.c:2253 kthread+0x3a1/0x470 kernel/kthread.c:231 ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431 Code: ff ff 48 85 c0 74 24 48 89 c7 e8 48 ea ff ff bf 01 00 00 00 e8 de 20 e3 ff 65 8b 05 b7 2f c2 7e 85 c0 75 c9 e8 f9 0b c1 ff eb c2 <0f> 0b 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 48 b8 00 00 RIP: symbol_put_addr+0x54/0x60 RSP: ffff88006a7ce210 ---[ end trace b75b357739e7e116 ]--- Signed-off-by: Andrey Konovalov Cc: Ben Hutchings Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/dvb-usb/dib0700_devices.c | 24 ++++++++++----------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 6020170fe99a3d..92098c1b78e51c 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c @@ -291,7 +291,7 @@ static int stk7700P2_frontend_attach(struct dvb_usb_adapter *adap) stk7700d_dib7000p_mt2266_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -325,7 +325,7 @@ static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap) stk7700d_dib7000p_mt2266_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -478,7 +478,7 @@ static int stk7700ph_frontend_attach(struct dvb_usb_adapter *adap) &stk7700ph_dib7700_xc3028_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -1010,7 +1010,7 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap) &dib7070p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -1068,7 +1068,7 @@ static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap) &dib7770p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3056,7 +3056,7 @@ static int nim7090_frontend_attach(struct dvb_usb_adapter *adap) if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config); @@ -3109,7 +3109,7 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap) /* initialize IC 0 */ if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3139,7 +3139,7 @@ static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap) i2c = state->dib7000p_ops.get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1); if (state->dib7000p_ops.i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3214,7 +3214,7 @@ static int tfe7790p_frontend_attach(struct dvb_usb_adapter *adap) 1, 0x10, &tfe7790p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, @@ -3309,7 +3309,7 @@ static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap) stk7070pd_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3384,7 +3384,7 @@ static int novatd_frontend_attach(struct dvb_usb_adapter *adap) stk7070pd_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -3620,7 +3620,7 @@ static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap) if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap) == 0) { /* Demodulator not found for some reason? */ - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } From a37b2a1cc6e99812190dac5a790affe7368d81a6 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 5 Nov 2017 18:30:44 -0800 Subject: [PATCH 0005/3326] crypto: dh - Fix double free of ctx->p commit 12d41a023efb01b846457ccdbbcbe2b65a87d530 upstream. When setting the secret with the software Diffie-Hellman implementation, if allocating 'g' failed (e.g. if it was longer than MAX_EXTERN_MPI_BITS), then 'p' was freed twice: once immediately, and once later when the crypto_kpp tfm was destroyed. Fix it by using dh_free_ctx() (renamed to dh_clear_ctx()) in the error paths, as that correctly sets the pointers to NULL. KASAN report: MPI: mpi too large (32760 bits) ================================================================== BUG: KASAN: use-after-free in mpi_free+0x131/0x170 Read of size 4 at addr ffff88006c7cdf90 by task reproduce_doubl/367 CPU: 1 PID: 367 Comm: reproduce_doubl Not tainted 4.14.0-rc7-00040-g05298abde6fe #7 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: dump_stack+0xb3/0x10b ? mpi_free+0x131/0x170 print_address_description+0x79/0x2a0 ? mpi_free+0x131/0x170 kasan_report+0x236/0x340 ? akcipher_register_instance+0x90/0x90 __asan_report_load4_noabort+0x14/0x20 mpi_free+0x131/0x170 ? akcipher_register_instance+0x90/0x90 dh_exit_tfm+0x3d/0x140 crypto_kpp_exit_tfm+0x52/0x70 crypto_destroy_tfm+0xb3/0x250 __keyctl_dh_compute+0x640/0xe90 ? kasan_slab_free+0x12f/0x180 ? dh_data_from_key+0x240/0x240 ? key_create_or_update+0x1ee/0xb20 ? key_instantiate_and_link+0x440/0x440 ? lock_contended+0xee0/0xee0 ? kfree+0xcf/0x210 ? SyS_add_key+0x268/0x340 keyctl_dh_compute+0xb3/0xf1 ? __keyctl_dh_compute+0xe90/0xe90 ? SyS_add_key+0x26d/0x340 ? entry_SYSCALL_64_fastpath+0x5/0xbe ? trace_hardirqs_on_caller+0x3f4/0x560 SyS_keyctl+0x72/0x2c0 entry_SYSCALL_64_fastpath+0x1f/0xbe RIP: 0033:0x43ccf9 RSP: 002b:00007ffeeec96158 EFLAGS: 00000246 ORIG_RAX: 00000000000000fa RAX: ffffffffffffffda RBX: 000000000248b9b9 RCX: 000000000043ccf9 RDX: 00007ffeeec96170 RSI: 00007ffeeec96160 RDI: 0000000000000017 RBP: 0000000000000046 R08: 0000000000000000 R09: 0248b9b9143dc936 R10: 0000000000001000 R11: 0000000000000246 R12: 0000000000000000 R13: 0000000000409670 R14: 0000000000409700 R15: 0000000000000000 Allocated by task 367: save_stack_trace+0x16/0x20 kasan_kmalloc+0xeb/0x180 kmem_cache_alloc_trace+0x114/0x300 mpi_alloc+0x4b/0x230 mpi_read_raw_data+0xbe/0x360 dh_set_secret+0x1dc/0x460 __keyctl_dh_compute+0x623/0xe90 keyctl_dh_compute+0xb3/0xf1 SyS_keyctl+0x72/0x2c0 entry_SYSCALL_64_fastpath+0x1f/0xbe Freed by task 367: save_stack_trace+0x16/0x20 kasan_slab_free+0xab/0x180 kfree+0xb5/0x210 mpi_free+0xcb/0x170 dh_set_secret+0x2d7/0x460 __keyctl_dh_compute+0x623/0xe90 keyctl_dh_compute+0xb3/0xf1 SyS_keyctl+0x72/0x2c0 entry_SYSCALL_64_fastpath+0x1f/0xbe Fixes: 802c7f1c84e4 ("crypto: dh - Add DH software implementation") Signed-off-by: Eric Biggers Reviewed-by: Tudor Ambarus Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/dh.c | 33 +++++++++++++-------------------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/crypto/dh.c b/crypto/dh.c index b1032a5c1bfa1a..aadaf36fb56f90 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -21,19 +21,12 @@ struct dh_ctx { MPI xa; }; -static inline void dh_clear_params(struct dh_ctx *ctx) +static void dh_clear_ctx(struct dh_ctx *ctx) { mpi_free(ctx->p); mpi_free(ctx->g); - ctx->p = NULL; - ctx->g = NULL; -} - -static void dh_free_ctx(struct dh_ctx *ctx) -{ - dh_clear_params(ctx); mpi_free(ctx->xa); - ctx->xa = NULL; + memset(ctx, 0, sizeof(*ctx)); } /* @@ -71,10 +64,8 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params) return -EINVAL; ctx->g = mpi_read_raw_data(params->g, params->g_size); - if (!ctx->g) { - mpi_free(ctx->p); + if (!ctx->g) return -EINVAL; - } return 0; } @@ -86,21 +77,23 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf, struct dh params; /* Free the old MPI key if any */ - dh_free_ctx(ctx); + dh_clear_ctx(ctx); if (crypto_dh_decode_key(buf, len, ¶ms) < 0) - return -EINVAL; + goto err_clear_ctx; if (dh_set_params(ctx, ¶ms) < 0) - return -EINVAL; + goto err_clear_ctx; ctx->xa = mpi_read_raw_data(params.key, params.key_size); - if (!ctx->xa) { - dh_clear_params(ctx); - return -EINVAL; - } + if (!ctx->xa) + goto err_clear_ctx; return 0; + +err_clear_ctx: + dh_clear_ctx(ctx); + return -EINVAL; } static int dh_compute_value(struct kpp_request *req) @@ -158,7 +151,7 @@ static void dh_exit_tfm(struct crypto_kpp *tfm) { struct dh_ctx *ctx = dh_get_ctx(tfm); - dh_free_ctx(ctx); + dh_clear_ctx(ctx); } static struct kpp_alg dh = { From 716b9ea8c6b07e293eb4af7cd68cbbb6c9490120 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 5 Nov 2017 18:30:45 -0800 Subject: [PATCH 0006/3326] crypto: dh - Don't permit 'p' to be 0 commit 199512b1234f09e44d592153ec82b44212b2f0c4 upstream. If 'p' is 0 for the software Diffie-Hellman implementation, then dh_max_size() returns 0. In the case of KEYCTL_DH_COMPUTE, this causes ZERO_SIZE_PTR to be passed to sg_init_one(), which with CONFIG_DEBUG_SG=y triggers the 'BUG_ON(!virt_addr_valid(buf));' in sg_set_buf(). Fix this by making crypto_dh_decode_key() reject 0 for 'p'. p=0 makes no sense for any DH implementation because 'p' is supposed to be a prime number. Moreover, 'mod 0' is not mathematically defined. Bug report: kernel BUG at ./include/linux/scatterlist.h:140! invalid opcode: 0000 [#1] SMP KASAN CPU: 0 PID: 27112 Comm: syz-executor2 Not tainted 4.14.0-rc7-00010-gf5dbb5d0ce32-dirty #7 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.3-20171021_125229-anatol 04/01/2014 task: ffff88006caac0c0 task.stack: ffff88006c7c8000 RIP: 0010:sg_set_buf include/linux/scatterlist.h:140 [inline] RIP: 0010:sg_init_one+0x1b3/0x240 lib/scatterlist.c:156 RSP: 0018:ffff88006c7cfb08 EFLAGS: 00010216 RAX: 0000000000010000 RBX: ffff88006c7cfe30 RCX: 00000000000064ee RDX: ffffffff81cf64c3 RSI: ffffc90000d72000 RDI: ffffffff92e937e0 RBP: ffff88006c7cfb30 R08: ffffed000d8f9fab R09: ffff88006c7cfd30 R10: 0000000000000005 R11: ffffed000d8f9faa R12: ffff88006c7cfd30 R13: 0000000000000000 R14: 0000000000000010 R15: ffff88006c7cfc50 FS: 00007fce190fa700(0000) GS:ffff88003ea00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fffc6b33db8 CR3: 000000003cf64000 CR4: 00000000000006f0 Call Trace: __keyctl_dh_compute+0xa95/0x19b0 security/keys/dh.c:360 keyctl_dh_compute+0xac/0x100 security/keys/dh.c:434 SYSC_keyctl security/keys/keyctl.c:1745 [inline] SyS_keyctl+0x72/0x2c0 security/keys/keyctl.c:1641 entry_SYSCALL_64_fastpath+0x1f/0xbe RIP: 0033:0x4585c9 RSP: 002b:00007fce190f9bd8 EFLAGS: 00000216 ORIG_RAX: 00000000000000fa RAX: ffffffffffffffda RBX: 0000000000738020 RCX: 00000000004585c9 RDX: 000000002000d000 RSI: 0000000020000ff4 RDI: 0000000000000017 RBP: 0000000000000046 R08: 0000000020008000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000216 R12: 00007fff6e610cde R13: 00007fff6e610cdf R14: 00007fce190fa700 R15: 0000000000000000 Code: 03 0f b6 14 02 48 89 f8 83 e0 07 83 c0 03 38 d0 7c 04 84 d2 75 33 5b 45 89 6c 24 14 41 5c 41 5d 41 5e 41 5f 5d c3 e8 fd 8f 68 ff <0f> 0b e8 f6 8f 68 ff 0f 0b e8 ef 8f 68 ff 0f 0b e8 e8 8f 68 ff 20 RIP: sg_set_buf include/linux/scatterlist.h:140 [inline] RSP: ffff88006c7cfb08 RIP: sg_init_one+0x1b3/0x240 lib/scatterlist.c:156 RSP: ffff88006c7cfb08 Fixes: 802c7f1c84e4 ("crypto: dh - Add DH software implementation") Reviewed-by: Tudor Ambarus Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/dh_helper.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c index 8ba8a3f826200c..708ae20d2d3ccf 100644 --- a/crypto/dh_helper.c +++ b/crypto/dh_helper.c @@ -90,6 +90,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) params->p = (void *)(ptr + params->key_size); params->g = (void *)(ptr + params->key_size + params->p_size); + /* + * Don't permit 'p' to be 0. It's not a prime number, and it's subject + * to corner cases such as 'mod 0' being undefined or + * crypto_kpp_maxsize() returning 0. + */ + if (memchr_inv(params->p, 0, params->p_size) == NULL) + return -EINVAL; + return 0; } EXPORT_SYMBOL_GPL(crypto_dh_decode_key); From 5e56be0e260cc367d2eb1823a9b476d018d80152 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 5 Nov 2017 18:30:46 -0800 Subject: [PATCH 0007/3326] crypto: dh - Don't permit 'key' or 'g' size longer than 'p' commit ccd9888f14a8019c0bbdeeae758aba1f58693712 upstream. The "qat-dh" DH implementation assumes that 'key' and 'g' can be copied into a buffer with size 'p_size'. However it was never checked that that was actually the case, which most likely allowed users to cause a buffer underflow via KEYCTL_DH_COMPUTE. Fix this by updating crypto_dh_decode_key() to verify this precondition for all DH implementations. Fixes: c9839143ebbf ("crypto: qat - Add DH support") Signed-off-by: Eric Biggers Reviewed-by: Tudor Ambarus Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/dh_helper.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c index 708ae20d2d3ccf..7f00c771fe8d7c 100644 --- a/crypto/dh_helper.c +++ b/crypto/dh_helper.c @@ -83,6 +83,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) if (secret.len != crypto_dh_key_len(params)) return -EINVAL; + /* + * Don't permit the buffer for 'key' or 'g' to be larger than 'p', since + * some drivers assume otherwise. + */ + if (params->key_size > params->p_size || + params->g_size > params->p_size) + return -EINVAL; + /* Don't allocate memory. Set pointers to data within * the given buffer */ From 7d53b8eb50ae78c685139fa1a856e2bef4876763 Mon Sep 17 00:00:00 2001 From: raveendra padasalagi Date: Tue, 29 Aug 2017 15:08:10 +0530 Subject: [PATCH 0008/3326] crypto: brcm - Explicity ACK mailbox message commit f0e2ce58f853634d7ad1a418a49bd5fbd556227c upstream. Add support to explicity ACK mailbox message because after sending message we can know the send status via error attribute of brcm_message. This is needed to support "txdone_ack" supported in mailbox controller driver. Fixes: 9d12ba86f818 ("crypto: brcm - Add Broadcom SPU driver") Signed-off-by: Raveendra Padasalagi Reviewed-by: Anup Patel Reviewed-by: Scott Branden Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/bcm/cipher.c | 101 ++++++++++++++++-------------------- 1 file changed, 46 insertions(+), 55 deletions(-) diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 8685c7e4debdc1..ee52c355bee0a6 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -256,6 +256,44 @@ spu_ablkcipher_tx_sg_create(struct brcm_message *mssg, return 0; } +static int mailbox_send_message(struct brcm_message *mssg, u32 flags, + u8 chan_idx) +{ + int err; + int retry_cnt = 0; + struct device *dev = &(iproc_priv.pdev->dev); + + err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg); + if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) { + while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { + /* + * Mailbox queue is full. Since MAY_SLEEP is set, assume + * not in atomic context and we can wait and try again. + */ + retry_cnt++; + usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); + err = mbox_send_message(iproc_priv.mbox[chan_idx], + mssg); + atomic_inc(&iproc_priv.mb_no_spc); + } + } + if (err < 0) { + atomic_inc(&iproc_priv.mb_send_fail); + return err; + } + + /* Check error returned by mailbox controller */ + err = mssg->error; + if (unlikely(err < 0)) { + dev_err(dev, "message error %d", err); + /* Signal txdone for mailbox channel */ + } + + /* Signal txdone for mailbox channel */ + mbox_client_txdone(iproc_priv.mbox[chan_idx], err); + return err; +} + /** * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in * a single SPU request message, starting at the current position in the request @@ -293,7 +331,6 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx) u32 pad_len; /* total length of all padding */ bool update_key = false; struct brcm_message *mssg; /* mailbox message */ - int retry_cnt = 0; /* number of entries in src and dst sg in mailbox message. */ u8 rx_frag_num = 2; /* response header and STATUS */ @@ -462,24 +499,9 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx) if (err) return err; - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { - /* - * Mailbox queue is full. Since MAY_SLEEP is set, assume - * not in atomic context and we can wait and try again. - */ - retry_cnt++; - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], - mssg); - atomic_inc(&iproc_priv.mb_no_spc); - } - } - if (unlikely(err < 0)) { - atomic_inc(&iproc_priv.mb_send_fail); + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); + if (unlikely(err < 0)) return err; - } return -EINPROGRESS; } @@ -710,7 +732,6 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx) u32 spu_hdr_len; unsigned int digestsize; u16 rem = 0; - int retry_cnt = 0; /* * number of entries in src and dst sg. Always includes SPU msg header. @@ -904,24 +925,10 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx) if (err) return err; - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { - /* - * Mailbox queue is full. Since MAY_SLEEP is set, assume - * not in atomic context and we can wait and try again. - */ - retry_cnt++; - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], - mssg); - atomic_inc(&iproc_priv.mb_no_spc); - } - } - if (err < 0) { - atomic_inc(&iproc_priv.mb_send_fail); + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); + if (unlikely(err < 0)) return err; - } + return -EINPROGRESS; } @@ -1320,7 +1327,6 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx) int assoc_nents = 0; bool incl_icv = false; unsigned int digestsize = ctx->digestsize; - int retry_cnt = 0; /* number of entries in src and dst sg. Always includes SPU msg header. */ @@ -1558,24 +1564,9 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx) if (err) return err; - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { - /* - * Mailbox queue is full. Since MAY_SLEEP is set, assume - * not in atomic context and we can wait and try again. - */ - retry_cnt++; - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], - mssg); - atomic_inc(&iproc_priv.mb_no_spc); - } - } - if (err < 0) { - atomic_inc(&iproc_priv.mb_send_fail); + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); + if (unlikely(err < 0)) return err; - } return -EINPROGRESS; } @@ -4537,7 +4528,7 @@ static int spu_mb_init(struct device *dev) mcl->dev = dev; mcl->tx_block = false; mcl->tx_tout = 0; - mcl->knows_txdone = false; + mcl->knows_txdone = true; mcl->rx_callback = spu_rx_callback; mcl->tx_done = NULL; From 44efd1f1b1ea25e1393f91de18bff6608ba86e0f Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 7 Nov 2017 10:28:15 +0800 Subject: [PATCH 0009/3326] USB: early: Use new USB product ID and strings for DbC device commit c67678ec78eff9cf1e7e997fe6c37c9fcccfc5b8 upstream. The DbC register set defines an interface for system software to specify the vendor id and product id for the debug device. These two values will be presented by the debug device in its device descriptor idVendor and idProduct fields. The current used product ID is a place holder. We now have a valid one. The description strings are changed accordingly. This patch should be back-ported to kernels as old as v4.12, that contain the commit aeb9dd1de98c ("usb/early: Add driver for xhci debug capability"). Signed-off-by: Lu Baolu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/early/xhci-dbc.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h index 2df0f6e613fede..a516cab0bf4a54 100644 --- a/drivers/usb/early/xhci-dbc.h +++ b/drivers/usb/early/xhci-dbc.h @@ -90,8 +90,8 @@ struct xdbc_context { #define XDBC_INFO_CONTEXT_SIZE 48 #define XDBC_MAX_STRING_LENGTH 64 -#define XDBC_STRING_MANUFACTURER "Linux" -#define XDBC_STRING_PRODUCT "Remote GDB" +#define XDBC_STRING_MANUFACTURER "Linux Foundation" +#define XDBC_STRING_PRODUCT "Linux USB GDB Target" #define XDBC_STRING_SERIAL "0001" struct xdbc_strings { @@ -103,7 +103,7 @@ struct xdbc_strings { #define XDBC_PROTOCOL 1 /* GNU Remote Debug Command Set */ #define XDBC_VENDOR_ID 0x1d6b /* Linux Foundation 0x1d6b */ -#define XDBC_PRODUCT_ID 0x0004 /* __le16 idProduct; device 0004 */ +#define XDBC_PRODUCT_ID 0x0011 /* __le16 idProduct; device 0011 */ #define XDBC_DEVICE_REV 0x0010 /* 0.10 */ /* From 95b0ab9e531042861a375c904962bbd94c1f1e9f Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Wed, 8 Nov 2017 12:23:17 -0500 Subject: [PATCH 0010/3326] USB: usbfs: compute urb->actual_length for isochronous commit 2ef47001b3ee3ded579b7532ebdcf8680e4d8c54 upstream. The USB kerneldoc says that the actual_length field "is read in non-iso completion functions", but the usbfs driver uses it for all URB types in processcompl(). Since not all of the host controller drivers set actual_length for isochronous URBs, programs using usbfs with some host controllers don't work properly. For example, Minas reports that a USB camera controlled by libusb doesn't work properly with a dwc2 controller. It doesn't seem worthwhile to change the HCDs and the documentation, since the in-kernel USB class drivers evidently don't rely on actual_length for isochronous transfers. The easiest solution is for usbfs to calculate the actual_length value for itself, by adding up the lengths of the individual packets in an isochronous transfer. Signed-off-by: Alan Stern CC: Minas Harutyunyan Reported-and-tested-by: wlf Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/devio.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e9326f31db8d43..98c666ef9a5756 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1833,6 +1833,18 @@ static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg) return 0; } +static void compute_isochronous_actual_length(struct urb *urb) +{ + unsigned int i; + + if (urb->number_of_packets > 0) { + urb->actual_length = 0; + for (i = 0; i < urb->number_of_packets; i++) + urb->actual_length += + urb->iso_frame_desc[i].actual_length; + } +} + static int processcompl(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; @@ -1840,6 +1852,7 @@ static int processcompl(struct async *as, void __user * __user *arg) void __user *addr = as->userurb; unsigned int i; + compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) goto err_out; @@ -2008,6 +2021,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) void __user *addr = as->userurb; unsigned int i; + compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) return -EFAULT; From 1e7577588cce7aa7528bb822d99955514cdd1753 Mon Sep 17 00:00:00 2001 From: Bernhard Rosenkraenzer Date: Fri, 3 Nov 2017 16:46:02 +0100 Subject: [PATCH 0011/3326] USB: Add delay-init quirk for Corsair K70 LUX keyboards commit a0fea6027f19c62727315aba1a7fae75a9caa842 upstream. Without this patch, K70 LUX keyboards don't work, saying usb 3-3: unable to read config index 0 descriptor/all usb 3-3: can't read configurations, error -110 usb usb3-port3: unable to enumerate USB device Signed-off-by: Bernhard Rosenkraenzer Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/quirks.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index a6aaf2f193a464..37c418e581fbe3 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -221,6 +221,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Corsair Strafe RGB */ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Corsair K70 LUX */ + { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, + /* MIDI keyboard WORLDE MINI */ { USB_DEVICE(0x1c75, 0x0204), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, From d74d586746ccf743df309ed2c9a4232e858dbd68 Mon Sep 17 00:00:00 2001 From: Andrew Gabbasov Date: Wed, 8 Nov 2017 10:13:15 -0700 Subject: [PATCH 0012/3326] usb: gadget: f_fs: Fix use-after-free in ffs_free_inst commit cdafb6d8b8da7fde266f79b3287ac221aa841879 upstream. KASAN enabled configuration reports an error BUG: KASAN: use-after-free in ffs_free_inst+... [usb_f_fs] at addr ... Write of size 8 by task ... This is observed after "ffs-test" is run and interrupted. If after that functionfs is unmounted and g_ffs module is unloaded, that use-after-free occurs during g_ffs module removal. Although the report indicates ffs_free_inst() function, the actual use-after-free condition occurs in _ffs_free_dev() function, which is probably inlined into ffs_free_inst(). This happens due to keeping the ffs_data reference in device structure during functionfs unmounting, while ffs_data itself is freed as no longer needed. The fix is to clear that reference in ffs_closed() function, which is a counterpart of ffs_ready(), where the reference is stored. Fixes: 3262ad824307 ("usb: gadget: f_fs: Stop ffs_closed NULL pointer dereference") Signed-off-by: Andrew Gabbasov Acked-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/f_fs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 8b342587f8ad6e..ef8f7d63a8f0a4 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -3677,6 +3677,7 @@ static void ffs_closed(struct ffs_data *ffs) goto done; ffs_obj->desc_ready = false; + ffs_obj->ffs_data = NULL; if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) && ffs_obj->ffs_closed_callback) From b55d4612484e516ce2e9804db619f7aa66ecb5e7 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 12 Oct 2017 10:54:21 +0200 Subject: [PATCH 0013/3326] USB: serial: metro-usb: stop I/O after failed open commit 2339536d229df25c71c0900fc619289229bfecf6 upstream. Make sure to kill the interrupt-in URB after a failed open request. Apart from saving power (and avoiding stale input after a later successful open), this also prevents a NULL-deref in the completion handler if the port is manually unbound. Reviewed-by: Greg Kroah-Hartman Fixes: 704577861d5e ("USB: serial: metro-usb: get data from device in Uni-Directional mode.") Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/metro-usb.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c index 14511d6a7d44db..3950d44b80f1ce 100644 --- a/drivers/usb/serial/metro-usb.c +++ b/drivers/usb/serial/metro-usb.c @@ -189,7 +189,7 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port) dev_err(&port->dev, "%s - failed submitting interrupt in urb, error code=%d\n", __func__, result); - goto exit; + return result; } /* Send activate cmd to device */ @@ -198,9 +198,14 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port) dev_err(&port->dev, "%s - failed to configure device, error code=%d\n", __func__, result); - goto exit; + goto err_kill_urb; } -exit: + + return 0; + +err_kill_urb: + usb_kill_urb(port->interrupt_in_urb); + return result; } From ff6b050208b1c2ce5e1224cc8caf98a33e8f08ea Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 7 Nov 2017 10:28:16 +0800 Subject: [PATCH 0014/3326] USB: serial: Change DbC debug device binding ID commit 12f28144cf2cf69e1520e238eee7c384719ca44b upstream. The product ID for "Linux USB GDB Target device" has been changed. Change the driver binding table accordingly. This patch should be back-ported to kernels as old as v4.12, that contain the commit 57fb47279a04 ("usb/serial: Add DBC debug device support to usb_debug"). Cc: Johan Hovold Signed-off-by: Lu Baolu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/usb_debug.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c index 12f4c5a91e628e..48f285a1ad00bd 100644 --- a/drivers/usb/serial/usb_debug.c +++ b/drivers/usb/serial/usb_debug.c @@ -34,13 +34,13 @@ static const struct usb_device_id id_table[] = { }; static const struct usb_device_id dbc_id_table[] = { - { USB_DEVICE(0x1d6b, 0x0004) }, + { USB_DEVICE(0x1d6b, 0x0011) }, { }, }; static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(0x0525, 0x127a) }, - { USB_DEVICE(0x1d6b, 0x0004) }, + { USB_DEVICE(0x1d6b, 0x0011) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table_combined); From 1922d5a084a4dfa387658d07f6b786e2e801b606 Mon Sep 17 00:00:00 2001 From: Douglas Fischer Date: Sun, 29 Oct 2017 23:29:55 +0000 Subject: [PATCH 0015/3326] USB: serial: qcserial: add pid/vid for Sierra Wireless EM7355 fw update commit 771394a54148f18926ca86414e51c69eda27d0cd upstream. Add USB PID/VID for Sierra Wireless EM7355 LTE modem QDL firmware update mode. Signed-off-by: Douglas Fischer Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/qcserial.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index eb9928963a53c8..9f9d3a9044642d 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -148,6 +148,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */ {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC7304/MC7354 */ {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ + {DEVICE_SWI(0x1199, 0x901e)}, /* Sierra Wireless EM7355 QDL */ {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */ From df2ca939fed4e17d9d260b28420952e10a4615da Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 11 Oct 2017 14:02:57 +0200 Subject: [PATCH 0016/3326] USB: serial: garmin_gps: fix I/O after failed probe and remove commit 19a565d9af6e0d828bd0d521d3bafd5017f4ce52 upstream. Make sure to stop any submitted interrupt and bulk-out URBs before returning after failed probe and when the port is being unbound to avoid later NULL-pointer dereferences in the completion callbacks. Also fix up the related and broken I/O cancellation on failed open and on close. (Note that port->write_urb was never submitted.) Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reviewed-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/garmin_gps.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index b2f2e87aed945d..c72ffbd913adb4 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -138,6 +138,7 @@ struct garmin_data { __u8 privpkt[4*6]; spinlock_t lock; struct list_head pktlist; + struct usb_anchor write_urbs; }; @@ -905,13 +906,19 @@ static int garmin_init_session(struct usb_serial_port *port) sizeof(GARMIN_START_SESSION_REQ), 0); if (status < 0) - break; + goto err_kill_urbs; } if (status > 0) status = 0; } + return status; + +err_kill_urbs: + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); + usb_kill_urb(port->interrupt_in_urb); + return status; } @@ -930,7 +937,6 @@ static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port) spin_unlock_irqrestore(&garmin_data_p->lock, flags); /* shutdown any bulk reads that might be going on */ - usb_kill_urb(port->write_urb); usb_kill_urb(port->read_urb); if (garmin_data_p->state == STATE_RESET) @@ -953,7 +959,7 @@ static void garmin_close(struct usb_serial_port *port) /* shutdown our urbs */ usb_kill_urb(port->read_urb); - usb_kill_urb(port->write_urb); + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); /* keep reset state so we know that we must start a new session */ if (garmin_data_p->state != STATE_RESET) @@ -1037,12 +1043,14 @@ static int garmin_write_bulk(struct usb_serial_port *port, } /* send it down the pipe */ + usb_anchor_urb(urb, &garmin_data_p->write_urbs); status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed with status = %d\n", __func__, status); count = status; + usb_unanchor_urb(urb); kfree(buffer); } @@ -1401,6 +1409,7 @@ static int garmin_port_probe(struct usb_serial_port *port) garmin_data_p->state = 0; garmin_data_p->flags = 0; garmin_data_p->count = 0; + init_usb_anchor(&garmin_data_p->write_urbs); usb_set_serial_port_data(port, garmin_data_p); status = garmin_init_session(port); @@ -1413,6 +1422,7 @@ static int garmin_port_remove(struct usb_serial_port *port) { struct garmin_data *garmin_data_p = usb_get_serial_port_data(port); + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); usb_kill_urb(port->interrupt_in_urb); del_timer_sync(&garmin_data_p->timer); kfree(garmin_data_p); From 3a5a567333df30aa99890a16b9686efcf3442a09 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 11 Oct 2017 14:02:58 +0200 Subject: [PATCH 0017/3326] USB: serial: garmin_gps: fix memory leak on probe errors commit 74d471b598444b7f2d964930f7234779c80960a0 upstream. Make sure to free the port private data before returning after a failed probe attempt. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reviewed-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/garmin_gps.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index c72ffbd913adb4..91e7e3a166a5cd 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -1413,6 +1413,12 @@ static int garmin_port_probe(struct usb_serial_port *port) usb_set_serial_port_data(port, garmin_data_p); status = garmin_init_session(port); + if (status) + goto err_free; + + return 0; +err_free: + kfree(garmin_data_p); return status; } From 502f3975c0d7b30c4779f775f0b108b8999d2046 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sat, 4 Nov 2017 04:19:48 -0700 Subject: [PATCH 0018/3326] selftests/x86/protection_keys: Fix syscall NR redefinition warnings commit 693cb5580fdb026922363aa103add64b3ecd572e upstream. On new enough glibc, the pkey syscalls numbers are available. Check first before defining them to avoid warnings like: protection_keys.c:198:0: warning: "SYS_pkey_alloc" redefined Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1fbef53a9e6befb7165ff855fc1a7d4788a191d6.1509794321.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/x86/protection_keys.c | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c index 555e43ca846b2b..7a1cc0e56d2d6a 100644 --- a/tools/testing/selftests/x86/protection_keys.c +++ b/tools/testing/selftests/x86/protection_keys.c @@ -189,17 +189,29 @@ void lots_o_noops_around_write(int *write_to_me) #define u64 uint64_t #ifdef __i386__ -#define SYS_mprotect_key 380 -#define SYS_pkey_alloc 381 -#define SYS_pkey_free 382 + +#ifndef SYS_mprotect_key +# define SYS_mprotect_key 380 +#endif +#ifndef SYS_pkey_alloc +# define SYS_pkey_alloc 381 +# define SYS_pkey_free 382 +#endif #define REG_IP_IDX REG_EIP #define si_pkey_offset 0x14 + #else -#define SYS_mprotect_key 329 -#define SYS_pkey_alloc 330 -#define SYS_pkey_free 331 + +#ifndef SYS_mprotect_key +# define SYS_mprotect_key 329 +#endif +#ifndef SYS_pkey_alloc +# define SYS_pkey_alloc 330 +# define SYS_pkey_free 331 +#endif #define REG_IP_IDX REG_RIP #define si_pkey_offset 0x20 + #endif void dump_mem(void *dumpme, int len_bytes) From 3919ccafbdd16c95155a120a166dd72087dd08c4 Mon Sep 17 00:00:00 2001 From: Yazen Ghannam Date: Mon, 6 Nov 2017 18:46:32 +0100 Subject: [PATCH 0019/3326] x86/MCE/AMD: Always give panic severity for UC errors in kernel context commit d65dfc81bb3894fdb68cbc74bbf5fb48d2354071 upstream. The AMD severity grading function was introduced in kernel 4.1. The current logic can possibly give MCE_AR_SEVERITY for uncorrectable errors in kernel context. The system may then get stuck in a loop as memory_failure() will try to handle the bad kernel memory and find it busy. Return MCE_PANIC_SEVERITY for all UC errors IN_KERNEL context on AMD systems. After: b2f9d678e28c ("x86/mce: Check for faults tagged in EXTABLE_CLASS_FAULT exception table entries") was accepted in v4.6, this issue was masked because of the tail-end attempt at kernel mode recovery in the #MC handler. However, uncorrectable errors IN_KERNEL context should always be considered unrecoverable and cause a panic. Signed-off-by: Yazen Ghannam Signed-off-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-edac Fixes: bf80bbd7dcf5 (x86/mce: Add an AMD severities-grading function) Link: http://lkml.kernel.org/r/20171106174633.13576-1-bp@alien8.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/mcheck/mce-severity.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index 87cc9ab7a13cde..4b8187639c2df3 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c @@ -245,6 +245,9 @@ static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_exc if (m->status & MCI_STATUS_UC) { + if (ctx == IN_KERNEL) + return MCE_PANIC_SEVERITY; + /* * On older systems where overflow_recov flag is not present, we * should simply panic if an error overflow occurs. If @@ -255,10 +258,6 @@ static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_exc if (mce_flags.smca) return mce_severity_amd_smca(m, ctx); - /* software can try to contain */ - if (!(m->mcgstatus & MCG_STATUS_RIPV) && (ctx == IN_KERNEL)) - return MCE_PANIC_SEVERITY; - /* kill current process */ return MCE_AR_SEVERITY; } else { From 202a3e232974a63f0fd1e0091821bb88931c3914 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 5 Oct 2017 20:04:04 +0200 Subject: [PATCH 0020/3326] platform/x86: peaq-wmi: Add DMI check before binding to the WMI interface commit 3b95206110a2c13076c3a7fa8ddeae36c2dbcf42 upstream. It seems that the WMI GUID used by the PEAQ 2-in-1 WMI hotkeys is not as unique as a GUID should be and is used on some other devices too. This is causing spurious key-press reports on these other devices. This commits adds a DMI check to the PEAQ 2-in-1 WMI hotkeys driver to ensure that it is actually running on a PEAQ 2-in-1, fixing the spurious key-presses on these other devices. BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1497861 BugLink: https://bugzilla.suse.com/attachment.cgi?id=743182 Signed-off-by: Hans de Goede Signed-off-by: Andy Shevchenko Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/peaq-wmi.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c index bc98ef95514a17..67fa3fa32011de 100644 --- a/drivers/platform/x86/peaq-wmi.c +++ b/drivers/platform/x86/peaq-wmi.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -64,8 +65,22 @@ static void peaq_wmi_poll(struct input_polled_dev *dev) } } +/* Some other devices (Shuttle XS35) use the same WMI GUID for other purposes */ +static const struct dmi_system_id peaq_dmi_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"), + DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"), + }, + }, +}; + static int __init peaq_wmi_init(void) { + /* WMI GUID is not unique, also check for a DMI match */ + if (!dmi_check_system(peaq_dmi_table)) + return -ENODEV; + if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID)) return -ENODEV; @@ -86,6 +101,9 @@ static int __init peaq_wmi_init(void) static void __exit peaq_wmi_exit(void) { + if (!dmi_check_system(peaq_dmi_table)) + return; + if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID)) return; From 417b152ef12fe4cfed7453c7e475682629e94f6f Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 30 Oct 2017 14:07:37 +0100 Subject: [PATCH 0021/3326] platform/x86: peaq_wmi: Fix missing terminating entry for peaq_dmi_table commit d6fa71f1c003fb2bc824276bb424a4171f9a717f upstream. Add missing terminating entry to peaq_dmi_table. Fixes: 3b95206110a2 ("platform/x86: peaq-wmi: Add DMI check before ...") Reported-by: Fengguang Wu Signed-off-by: Hans de Goede Signed-off-by: Andy Shevchenko Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/peaq-wmi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c index 67fa3fa32011de..2da48ecc90c122 100644 --- a/drivers/platform/x86/peaq-wmi.c +++ b/drivers/platform/x86/peaq-wmi.c @@ -73,6 +73,7 @@ static const struct dmi_system_id peaq_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"), }, }, + {} }; static int __init peaq_wmi_init(void) From 740ef39bce15d7d74a5b02b7b0e6abb472a9c76c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Szymanski?= Date: Thu, 2 Nov 2017 12:12:43 +0100 Subject: [PATCH 0022/3326] HID: cp2112: add HIDRAW dependency MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit cde3076bdc38bf436e517a379759a9092c6ffd4f upstream. Otherwise, with HIDRAW=n, the probe function crashes because of null dereference of hdev->hidraw. Fixes: 42cb6b35b9e6 ("HID: cp2112: use proper hidraw name with minor number") Signed-off-by: Sébastien Szymanski Acked-by: Benjamin Tissoires Signed-off-by: Jiri Kosina Signed-off-by: Greg Kroah-Hartman --- drivers/hid/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 374301fcbc8679..8c7a0ce147a136 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -230,7 +230,7 @@ config HID_CMEDIA config HID_CP2112 tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support" - depends on USB_HID && I2C && GPIOLIB + depends on USB_HID && HIDRAW && I2C && GPIOLIB select GPIOLIB_IRQCHIP ---help--- Support for Silicon Labs CP2112 HID USB to SMBus Master Bridge. From 29718bff1cf9548f9009a55cc03588279beb511b Mon Sep 17 00:00:00 2001 From: Jason Gerecke Date: Wed, 18 Oct 2017 08:27:13 -0700 Subject: [PATCH 0023/3326] HID: wacom: generic: Recognize WACOM_HID_WD_PEN as a type of pen collection commit 885e89f601a52cc6fb025b009df58ba83d142734 upstream. The WACOM_PEN_FIELD macro is used to determine if a given HID field should be associated with pen input. This field includes several known collection types that Wacom pen data is contained in, but the WACOM_HID_WD_PEN application collection type is notably missing. This can result in fields within this kind of collection being completely ignored by the `wacom_usage_mapping` function, preventing the later '*_event' functions from being notified about changes to their value. Fixes: c9c095874a ("HID: wacom: generic: Support and use 'Custom HID' mode and usages") Fixes: ac2423c975 ("HID: wacom: generic: add vendor defined touch") Reviewed-by: Ping Cheng Reviewed-by: Benjamin Tissoires Signed-off-by: Jason Gerecke Signed-off-by: Jiri Kosina Signed-off-by: Greg Kroah-Hartman --- drivers/hid/wacom_wac.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 8a03654048bf61..feb62fd4dfc3ec 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -166,6 +166,7 @@ ((f)->physical == HID_DG_PEN) || \ ((f)->application == HID_DG_PEN) || \ ((f)->application == HID_DG_DIGITIZER) || \ + ((f)->application == WACOM_HID_WD_PEN) || \ ((f)->application == WACOM_HID_WD_DIGITIZER) || \ ((f)->application == WACOM_HID_G9_PEN) || \ ((f)->application == WACOM_HID_G11_PEN)) From 99bff8f6d1ff8c63b1cf6bc04d1d7797484b5c0c Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Tue, 14 Nov 2017 21:45:34 -0800 Subject: [PATCH 0024/3326] rpmsg: glink: Add missing MODULE_LICENSE commit 1e0d5615bbc37deb7732491798abccf8d3c3d244 upstream. The qcom_glink_native driver is missing a MODULE_LICENSE(), correct this. Fixes: 835764ddd9af ("rpmsg: glink: Move the common glink protocol implementation to glink_native.c") Reported-by: Randy Dunlap Signed-off-by: Bjorn Andersson Signed-off-by: Greg Kroah-Hartman --- drivers/rpmsg/qcom_glink_native.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 5dcc9bf1c5bc5d..e3242a0925a671 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -1616,3 +1616,6 @@ void qcom_glink_native_unregister(struct qcom_glink *glink) device_unregister(glink->dev); } EXPORT_SYMBOL_GPL(qcom_glink_native_unregister); + +MODULE_DESCRIPTION("Qualcomm GLINK driver"); +MODULE_LICENSE("GPL v2"); From bbcb8d29aab2237491ac0f1da44b6e5765f5296a Mon Sep 17 00:00:00 2001 From: Aditya Shankar Date: Fri, 3 Nov 2017 14:26:27 +0530 Subject: [PATCH 0025/3326] staging: wilc1000: Fix bssid buffer offset in Txq commit 1bbf6a6d4091affb27ec0a19d7aa7887ce72f610 upstream. Commit 46949b48568b ("staging: wilc1000: New cfg packet format in handle_set_wfi_drv_handler") updated the frame format sent from host to the firmware. The code to update the bssid offset in the new frame was part of a second patch in the series which did not make it in and thus causes connection problems after associating to an AP. This fix adds the proper offset of the bssid value in the Tx queue buffer to fix the connection issues. Fixes: 46949b48568b ("staging: wilc1000: New cfg packet format in handle_set_wfi_drv_handler") Signed-off-by: Aditya Shankar Signed-off-by: Greg Kroah-Hartman --- drivers/staging/wilc1000/wilc_wlan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c index 9addef1f1e128b..f49dfa82f1b8a5 100644 --- a/drivers/staging/wilc1000/wilc_wlan.c +++ b/drivers/staging/wilc1000/wilc_wlan.c @@ -714,7 +714,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) char *bssid = ((struct tx_complete_data *)(tqe->priv))->bssid; buffer_offset = ETH_ETHERNET_HDR_OFFSET; - memcpy(&txb[offset + 4], bssid, 6); + memcpy(&txb[offset + 8], bssid, 6); } else { buffer_offset = HOST_HDR_OFFSET; } From 69cf5ab217d23429056f978c70a206c1596e03be Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Mon, 6 Nov 2017 08:43:03 +0800 Subject: [PATCH 0026/3326] staging: sm750fb: Fix parameter mistake in poke32 commit 16808dcf605e6302319a8c3266789b76d4c0983b upstream. In commit c075b6f2d357ea9 ("staging: sm750fb: Replace POKE32 and PEEK32 by inline functions"), POKE32 has been replaced by the inline function poke32. But it exchange the "addr" and "data" parameters by mistake, so fix it. Fixes: c075b6f2d357ea9 ("staging: sm750fb: Replace POKE32 and PEEK32 by inline functions"), Signed-off-by: Huacai Chen Signed-off-by: Liangliang Huang Signed-off-by: Greg Kroah-Hartman --- drivers/staging/sm750fb/ddk750_chip.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h index 09c223f815def3..aee82fcaf66938 100644 --- a/drivers/staging/sm750fb/ddk750_chip.h +++ b/drivers/staging/sm750fb/ddk750_chip.h @@ -18,7 +18,7 @@ static inline u32 peek32(u32 addr) return readl(addr + mmio750); } -static inline void poke32(u32 data, u32 addr) +static inline void poke32(u32 addr, u32 data) { writel(data, addr + mmio750); } From 75559e0f485432202fdf3c960229d75b77069076 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Mon, 30 Oct 2017 13:38:03 +0000 Subject: [PATCH 0027/3326] staging: ccree: fix 64 bit scatter/gather DMA ops commit e0b3f39092a1cff5548cbaf40096ec25e7721de6 upstream. Fix a wrong offset used in splitting a 64 DMA address to MSB/LSB parts needed for scatter/gather HW descriptors causing operations relying on them to fail on 64 bit platforms. Fixes: c6f7f2f4591f ("staging: ccree: refactor LLI access macros") Reported-by: Stuart Yoder Signed-off-by: Gilad Ben-Yossef Signed-off-by: Greg Kroah-Hartman --- drivers/staging/ccree/cc_lli_defs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/ccree/cc_lli_defs.h b/drivers/staging/ccree/cc_lli_defs.h index 851d3907167eae..a9c417b07b0429 100644 --- a/drivers/staging/ccree/cc_lli_defs.h +++ b/drivers/staging/ccree/cc_lli_defs.h @@ -59,7 +59,7 @@ static inline void cc_lli_set_addr(u32 *lli_p, dma_addr_t addr) lli_p[LLI_WORD0_OFFSET] = (addr & U32_MAX); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT lli_p[LLI_WORD1_OFFSET] &= ~LLI_HADDR_MASK; - lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 16)); + lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 32)); #endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ } From 5abe8d48471c212c45cbb0d522b70d76325ceac0 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Sun, 29 Oct 2017 13:01:33 +0100 Subject: [PATCH 0028/3326] staging: greybus: spilib: fix use-after-free after deregistration commit 770b03c2ca4aa44d226cf248f86aa23e546147d0 upstream. Remove erroneous spi_master_put() after controller deregistration which would access the already freed spi controller. Note that spi_unregister_master() drops our only controller reference. Fixes: ba3e67001b42 ("greybus: SPI: convert to a gpbridge driver") Signed-off-by: Johan Hovold Reviewed-by: Rui Miguel Silva Acked-by: Viresh Kumar Signed-off-by: Greg Kroah-Hartman --- drivers/staging/greybus/spilib.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c index e97b1914849711..1e7321a1404cc1 100644 --- a/drivers/staging/greybus/spilib.c +++ b/drivers/staging/greybus/spilib.c @@ -544,11 +544,14 @@ int gb_spilib_master_init(struct gb_connection *connection, struct device *dev, return 0; -exit_spi_unregister: - spi_unregister_master(master); exit_spi_put: spi_master_put(master); + return ret; + +exit_spi_unregister: + spi_unregister_master(master); + return ret; } EXPORT_SYMBOL_GPL(gb_spilib_master_init); @@ -558,7 +561,6 @@ void gb_spilib_master_exit(struct gb_connection *connection) struct spi_master *master = gb_connection_get_data(connection); spi_unregister_master(master); - spi_master_put(master); } EXPORT_SYMBOL_GPL(gb_spilib_master_exit); From ccc04bde3ae94fc46fdc1b74d1b10d8bc19175f5 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 12 Oct 2017 20:10:25 +0200 Subject: [PATCH 0029/3326] staging: vboxvideo: Fix reporting invalid suggested-offset-properties commit ce10d7b4e8e3574b9616e54a09d64521b9aeb8b6 upstream. The x and y hints receives from the host are unsigned 32 bit integers and they get set to -1 (0xffffffff) when invalid. Before this commit the vboxvideo driver was storing them in an u16 causing the -1 to be truncated to 65535 which, once reported to userspace, was breaking gnome 3.26+ in Wayland mode. This commit stores the host values in 32 bit variables, removing the truncation and checks for -1, replacing it with 0 as -1 is not a valid suggested-offset-property value. Likewise the properties are now initialized to 0 instead of -1, since -1 is not a valid value. This fixes gnome 3.26+ in Wayland mode not working with the vboxvideo driver. Reported-by: Gianfranco Costamagna Cc: Michael Thayer Signed-off-by: Hans de Goede Signed-off-by: Greg Kroah-Hartman --- drivers/staging/vboxvideo/vbox_drv.h | 8 ++++---- drivers/staging/vboxvideo/vbox_irq.c | 4 ++-- drivers/staging/vboxvideo/vbox_mode.c | 26 ++++++++++++++++++-------- 3 files changed, 24 insertions(+), 14 deletions(-) diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h index 4b9302703b362c..eeac4f0cb2c650 100644 --- a/drivers/staging/vboxvideo/vbox_drv.h +++ b/drivers/staging/vboxvideo/vbox_drv.h @@ -137,8 +137,8 @@ struct vbox_connector { char name[32]; struct vbox_crtc *vbox_crtc; struct { - u16 width; - u16 height; + u32 width; + u32 height; bool disconnected; } mode_hint; }; @@ -150,8 +150,8 @@ struct vbox_crtc { unsigned int crtc_id; u32 fb_offset; bool cursor_enabled; - u16 x_hint; - u16 y_hint; + u32 x_hint; + u32 y_hint; }; struct vbox_encoder { diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/staging/vboxvideo/vbox_irq.c index 3ca8bec62ac41b..74abdf02d9fddd 100644 --- a/drivers/staging/vboxvideo/vbox_irq.c +++ b/drivers/staging/vboxvideo/vbox_irq.c @@ -150,8 +150,8 @@ static void vbox_update_mode_hints(struct vbox_private *vbox) disconnected = !(hints->enabled); crtc_id = vbox_conn->vbox_crtc->crtc_id; - vbox_conn->mode_hint.width = hints->cx & 0x8fff; - vbox_conn->mode_hint.height = hints->cy & 0x8fff; + vbox_conn->mode_hint.width = hints->cx; + vbox_conn->mode_hint.height = hints->cy; vbox_conn->vbox_crtc->x_hint = hints->dx; vbox_conn->vbox_crtc->y_hint = hints->dy; vbox_conn->mode_hint.disconnected = disconnected; diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c index 257a7783041072..6f08dc96671936 100644 --- a/drivers/staging/vboxvideo/vbox_mode.c +++ b/drivers/staging/vboxvideo/vbox_mode.c @@ -553,12 +553,22 @@ static int vbox_get_modes(struct drm_connector *connector) ++num_modes; } vbox_set_edid(connector, preferred_width, preferred_height); - drm_object_property_set_value( - &connector->base, vbox->dev->mode_config.suggested_x_property, - vbox_connector->vbox_crtc->x_hint); - drm_object_property_set_value( - &connector->base, vbox->dev->mode_config.suggested_y_property, - vbox_connector->vbox_crtc->y_hint); + + if (vbox_connector->vbox_crtc->x_hint != -1) + drm_object_property_set_value(&connector->base, + vbox->dev->mode_config.suggested_x_property, + vbox_connector->vbox_crtc->x_hint); + else + drm_object_property_set_value(&connector->base, + vbox->dev->mode_config.suggested_x_property, 0); + + if (vbox_connector->vbox_crtc->y_hint != -1) + drm_object_property_set_value(&connector->base, + vbox->dev->mode_config.suggested_y_property, + vbox_connector->vbox_crtc->y_hint); + else + drm_object_property_set_value(&connector->base, + vbox->dev->mode_config.suggested_y_property, 0); return num_modes; } @@ -640,9 +650,9 @@ static int vbox_connector_init(struct drm_device *dev, drm_mode_create_suggested_offset_properties(dev); drm_object_attach_property(&connector->base, - dev->mode_config.suggested_x_property, -1); + dev->mode_config.suggested_x_property, 0); drm_object_attach_property(&connector->base, - dev->mode_config.suggested_y_property, -1); + dev->mode_config.suggested_y_property, 0); drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); From d3e36fd07b5b27768db8353a83dec0459bf2ee33 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 2 Nov 2017 10:30:13 +0100 Subject: [PATCH 0030/3326] staging: rtl8188eu: Revert 4 commits breaking ARP commit 66d32fdcbf03851724a8b551d490ae1ddfe6eef2 upstream. Commit 2ba8444c97b1 ("staging:r8188eu: move IV/ICV trimming into decrypt() and also place it after rtl88eu_mon_recv_hook()") breaks ARP. After this commit ssh-ing to a laptop with r8188eu wifi no longer works if the machine connecting has never communicated with the laptop before. This is 100% reproducable using "arp -d && ssh " to ssh to a laptop with r8188eu wifi. This commit reverts 4 commits in total: 1. Commit 79650ffde38e ("staging:r8188eu: trim IV/ICV fields in validate_recv_data_frame()") This commit depends on 2 of the other commits being reverted. 2. Commit 02b19b4c4920 ("staging:r8188eu: inline unprotect_frame() in mon_recv_decrypted_recv()") The inline code is wrong the un-inlined version contains: if (skb->len < hdr_len + iv_len + icv_len) return; ... Where as the inline-ed code introduced by this commit does: if (skb->len < hdr_len + iv_len + icv_len) { ... Note the same check, but now to actually continue doing ... instead of to not do it, so this commit is no good. 3. Commit d86e16da6a5d ("staging:r8188eu: use different mon_recv_decrypted() inside rtl88eu_mon_recv_hook() and rtl88eu_mon_xmit_hook().") This commit introduced a 1:1 copy of a function so that one of the 2 copies can be modified in the 2 commits we're already reverting. 4. Commit 2ba8444c97b1 ("staging:r8188eu: move IV/ICV trimming into decrypt() and also place it after rtl88eu_mon_recv_hook()") This is the commit actually breaking ARP. Note this commit is a straight-forward squash of the revert of these 4 commits, without any changes. Cc: Ivan Safonov Signed-off-by: Hans de Goede Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtl8188eu/core/rtw_recv.c | 83 ++++++++++++++--------- drivers/staging/rtl8188eu/os_dep/mon.c | 34 ++-------- 2 files changed, 55 insertions(+), 62 deletions(-) diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c index 3fd5f4102b360c..afb9dadc1cfe9f 100644 --- a/drivers/staging/rtl8188eu/core/rtw_recv.c +++ b/drivers/staging/rtl8188eu/core/rtw_recv.c @@ -259,10 +259,12 @@ static int recvframe_chkmic(struct adapter *adapter, } /* icv_len included the mic code */ - datalen = precvframe->pkt->len-prxattrib->hdrlen - 8; + datalen = precvframe->pkt->len-prxattrib->hdrlen - + prxattrib->iv_len-prxattrib->icv_len-8; pframe = precvframe->pkt->data; - payload = pframe+prxattrib->hdrlen; + payload = pframe+prxattrib->hdrlen+prxattrib->iv_len; + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len)); rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0], (unsigned char)prxattrib->priority); /* care the length of the data */ @@ -407,15 +409,9 @@ static struct recv_frame *decryptor(struct adapter *padapter, default: break; } - if (res != _FAIL) { - memmove(precv_frame->pkt->data + precv_frame->attrib.iv_len, precv_frame->pkt->data, precv_frame->attrib.hdrlen); - skb_pull(precv_frame->pkt, precv_frame->attrib.iv_len); - skb_trim(precv_frame->pkt, precv_frame->pkt->len - precv_frame->attrib.icv_len); - } } else if (prxattrib->bdecrypted == 1 && prxattrib->encrypt > 0 && - (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_)) { - psecuritypriv->hw_decrypted = true; - } + (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_)) + psecuritypriv->hw_decrypted = true; if (res == _FAIL) { rtw_free_recvframe(return_packet, &padapter->recvpriv.free_recv_queue); @@ -456,7 +452,7 @@ static struct recv_frame *portctrl(struct adapter *adapter, if (auth_alg == 2) { /* get ether_type */ - ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE; + ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE + pfhdr->attrib.iv_len; memcpy(&be_tmp, ptr, 2); ether_type = ntohs(be_tmp); @@ -1138,8 +1134,6 @@ static int validate_recv_data_frame(struct adapter *adapter, } if (pattrib->privacy) { - struct sk_buff *skb = precv_frame->pkt; - RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("validate_recv_data_frame:pattrib->privacy=%x\n", pattrib->privacy)); RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n ^^^^^^^^^^^IS_MCAST(pattrib->ra(0x%02x))=%d^^^^^^^^^^^^^^^6\n", pattrib->ra[0], IS_MCAST(pattrib->ra))); @@ -1148,13 +1142,6 @@ static int validate_recv_data_frame(struct adapter *adapter, RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n pattrib->encrypt=%d\n", pattrib->encrypt)); SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt); - - if (pattrib->bdecrypted == 1 && pattrib->encrypt > 0) { - memmove(skb->data + pattrib->iv_len, - skb->data, pattrib->hdrlen); - skb_pull(skb, pattrib->iv_len); - skb_trim(skb, skb->len - pattrib->icv_len); - } } else { pattrib->encrypt = 0; pattrib->iv_len = 0; @@ -1274,7 +1261,6 @@ static int validate_recv_frame(struct adapter *adapter, * Hence forward the frame to the monitor anyway to preserve the order * in which frames were received. */ - rtl88eu_mon_recv_hook(adapter->pmondev, precv_frame); exit: @@ -1296,8 +1282,11 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe) u8 *ptr = precvframe->pkt->data; struct rx_pkt_attrib *pattrib = &precvframe->attrib; - psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen); - psnap_type = ptr+pattrib->hdrlen + SNAP_SIZE; + if (pattrib->encrypt) + skb_trim(precvframe->pkt, precvframe->pkt->len - pattrib->icv_len); + + psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len); + psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE; /* convert hdr + possible LLC headers into Ethernet header */ if ((!memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) && (!memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) == false) && @@ -1310,9 +1299,12 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe) bsnaphdr = false; } - rmv_len = pattrib->hdrlen + (bsnaphdr ? SNAP_SIZE : 0); + rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0); len = precvframe->pkt->len - rmv_len; + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, + ("\n===pattrib->hdrlen: %x, pattrib->iv_len:%x===\n\n", pattrib->hdrlen, pattrib->iv_len)); + memcpy(&be_tmp, ptr+rmv_len, 2); eth_type = ntohs(be_tmp); /* pattrib->ether_type */ pattrib->eth_type = eth_type; @@ -1337,6 +1329,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter, struct __queue *defrag_q) { struct list_head *plist, *phead; + u8 wlanhdr_offset; u8 curfragnum; struct recv_frame *pfhdr, *pnfhdr; struct recv_frame *prframe, *pnextrframe; @@ -1385,7 +1378,12 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter, /* copy the 2nd~n fragment frame's payload to the first fragment */ /* get the 2nd~last fragment frame's payload */ - skb_pull(pnextrframe->pkt, pnfhdr->attrib.hdrlen); + wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len; + + skb_pull(pnextrframe->pkt, wlanhdr_offset); + + /* append to first fragment frame's tail (if privacy frame, pull the ICV) */ + skb_trim(prframe->pkt, prframe->pkt->len - pfhdr->attrib.icv_len); /* memcpy */ memcpy(skb_tail_pointer(pfhdr->pkt), pnfhdr->pkt->data, @@ -1393,7 +1391,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter, skb_put(prframe->pkt, pnfhdr->pkt->len); - pfhdr->attrib.icv_len = 0; + pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len; plist = plist->next; } @@ -1519,6 +1517,11 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe) nr_subframes = 0; pattrib = &prframe->attrib; + skb_pull(prframe->pkt, prframe->attrib.hdrlen); + + if (prframe->attrib.iv_len > 0) + skb_pull(prframe->pkt, prframe->attrib.iv_len); + a_len = prframe->pkt->len; pdata = prframe->pkt->data; @@ -1887,6 +1890,24 @@ static int process_recv_indicatepkts(struct adapter *padapter, return retval; } +static int recv_func_prehandle(struct adapter *padapter, + struct recv_frame *rframe) +{ + int ret = _SUCCESS; + struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue; + + /* check the frame crtl field and decache */ + ret = validate_recv_frame(padapter, rframe); + if (ret != _SUCCESS) { + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n")); + rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */ + goto exit; + } + +exit: + return ret; +} + static int recv_func_posthandle(struct adapter *padapter, struct recv_frame *prframe) { @@ -1939,7 +1960,6 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe) struct rx_pkt_attrib *prxattrib = &rframe->attrib; struct security_priv *psecuritypriv = &padapter->securitypriv; struct mlme_priv *mlmepriv = &padapter->mlmepriv; - struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue; /* check if need to handle uc_swdec_pending_queue*/ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) { @@ -1951,12 +1971,9 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe) } } - /* check the frame crtl field and decache */ - ret = validate_recv_frame(padapter, rframe); - if (ret != _SUCCESS) { - RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n")); - rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */ - } else { + ret = recv_func_prehandle(padapter, rframe); + + if (ret == _SUCCESS) { /* check if need to enqueue into uc_swdec_pending_queue*/ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && !IS_MCAST(prxattrib->ra) && prxattrib->encrypt > 0 && diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c index 37fd52d7364f5a..225c23fc69dce8 100644 --- a/drivers/staging/rtl8188eu/os_dep/mon.c +++ b/drivers/staging/rtl8188eu/os_dep/mon.c @@ -66,34 +66,6 @@ static void mon_recv_decrypted(struct net_device *dev, const u8 *data, netif_rx(skb); } -static void mon_recv_decrypted_recv(struct net_device *dev, const u8 *data, - int data_len) -{ - struct sk_buff *skb; - struct ieee80211_hdr *hdr; - int hdr_len; - - skb = netdev_alloc_skb(dev, data_len); - if (!skb) - return; - memcpy(skb_put(skb, data_len), data, data_len); - - /* - * Frame data is not encrypted. Strip off protection so - * userspace doesn't think that it is. - */ - - hdr = (struct ieee80211_hdr *)skb->data; - hdr_len = ieee80211_hdrlen(hdr->frame_control); - - if (ieee80211_has_protected(hdr->frame_control)) - hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_PROTECTED); - - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); -} - static void mon_recv_encrypted(struct net_device *dev, const u8 *data, int data_len) { @@ -110,6 +82,7 @@ static void mon_recv_encrypted(struct net_device *dev, const u8 *data, void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame) { struct rx_pkt_attrib *attr; + int iv_len, icv_len; int data_len; u8 *data; @@ -122,8 +95,11 @@ void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame) data = frame->pkt->data; data_len = frame->pkt->len; + /* Broadcast and multicast frames don't have attr->{iv,icv}_len set */ + SET_ICE_IV_LEN(iv_len, icv_len, attr->encrypt); + if (attr->bdecrypted) - mon_recv_decrypted_recv(dev, data, data_len); + mon_recv_decrypted(dev, data, data_len, iv_len, icv_len); else mon_recv_encrypted(dev, data, data_len); } From 9aacd82304adb74fa9320cb1c5d4bc71b1b91f06 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 30 Oct 2017 11:35:25 +0100 Subject: [PATCH 0031/3326] spi: fix use-after-free at controller deregistration commit 67f7b2781fafcc0f52464880154b320fea1ae982 upstream. The controller is typically freed as part of device_unregister() so store the bus id before deregistration to avoid use-after-free when the id is later released. Fixes: 9b61e302210e ("spi: Pick spi bus number from Linux idr or spi alias") Signed-off-by: Johan Hovold Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- drivers/spi/spi.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index e8b5a5e21b2e69..3ff0ee88c467a5 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -2245,11 +2245,12 @@ static int __unregister(struct device *dev, void *null) void spi_unregister_controller(struct spi_controller *ctlr) { struct spi_controller *found; + int id = ctlr->bus_num; int dummy; /* First make sure that this controller was ever added */ mutex_lock(&board_lock); - found = idr_find(&spi_master_idr, ctlr->bus_num); + found = idr_find(&spi_master_idr, id); mutex_unlock(&board_lock); if (found != ctlr) { dev_dbg(&ctlr->dev, @@ -2269,7 +2270,7 @@ void spi_unregister_controller(struct spi_controller *ctlr) device_unregister(&ctlr->dev); /* free bus id */ mutex_lock(&board_lock); - idr_remove(&spi_master_idr, ctlr->bus_num); + idr_remove(&spi_master_idr, id); mutex_unlock(&board_lock); } EXPORT_SYMBOL_GPL(spi_unregister_controller); From 05e15a59eef1f87d0f50b592546b2c1fa64abbd4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 27 Sep 2017 22:38:19 -0700 Subject: [PATCH 0032/3326] sparc32: Add cmpxchg64(). commit 23198ddffb6cddb5d5824230af4dd4b46e4046a4 upstream. This fixes the build with i40e driver enabled. Reported-by: Guenter Roeck Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- arch/sparc/include/asm/cmpxchg_32.h | 3 +++ arch/sparc/lib/atomic32.c | 14 ++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h index 3e3823db303e7d..c73b5a3ab7b91f 100644 --- a/arch/sparc/include/asm/cmpxchg_32.h +++ b/arch/sparc/include/asm/cmpxchg_32.h @@ -63,6 +63,9 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) (unsigned long)_n_, sizeof(*(ptr))); \ }) +u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new); +#define cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new) + #include /* diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index 5010df4973879f..465a901a0ada71 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -173,6 +173,20 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) } EXPORT_SYMBOL(__cmpxchg_u32); +u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new) +{ + unsigned long flags; + u64 prev; + + spin_lock_irqsave(ATOMIC_HASH(ptr), flags); + if ((prev = *ptr) == old) + *ptr = new; + spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); + + return prev; +} +EXPORT_SYMBOL(__cmpxchg_u64); + unsigned long __xchg_u32(volatile u32 *ptr, u32 new) { unsigned long flags; From e4b57c4bc11bfaa2588557b52ad5abd65fb9390e Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sun, 10 Sep 2017 13:44:47 -0700 Subject: [PATCH 0033/3326] sparc64: mmu_context: Add missing include files commit 01c3f0a42a2a0ff0c3fed80a1a25f2641ae72554 upstream. Fix the following build errors. In file included from arch/sparc/include/asm/mmu_context.h:4:0, from include/linux/mmu_context.h:4, from drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h:29, from drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c:23: arch/sparc/include/asm/mmu_context_64.h:22:37: error: unknown type name 'per_cpu_secondary_mm' arch/sparc/include/asm/mmu_context_64.h: In function 'switch_mm': arch/sparc/include/asm/mmu_context_64.h:79:2: error: implicit declaration of function 'smp_processor_id' Fixes: 70539bd79500 ("drm/amd: Update MEC HQD loading code for KFD") Signed-off-by: Guenter Roeck Acked-by: Oded Gabbay Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- arch/sparc/include/asm/mmu_context_64.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index e25d25b0a34b52..b361702ef52a75 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -8,9 +8,11 @@ #include #include +#include #include #include +#include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { From ea826dbc30d1c26380c050b669ddbb13c2170107 Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Fri, 3 Nov 2017 12:26:06 -0700 Subject: [PATCH 0034/3326] sparc64: Fix page table walk for PUD hugepages [ Upstream commit 70f3c8b7c2e7ebcdde8354da004872e7c9184e97 ] For a PUD hugepage entry, we need to propagate bits [32:22] from virtual address to resolve at 4M granularity. However, the current code was incorrectly propagating bits [29:19]. This bug can cause incorrect data to be returned for pages backed with 16G hugepages. Signed-off-by: Nitin Gupta Reported-by: Al Viro Cc: Al Viro Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- arch/sparc/include/asm/tsb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index 25b6abdb39083c..522a677e050d75 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -217,7 +217,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; sllx REG2, 32, REG2; \ andcc REG1, REG2, %g0; \ be,pt %xcc, 700f; \ - sethi %hi(0x1ffc0000), REG2; \ + sethi %hi(0xffe00000), REG2; \ sllx REG2, 1, REG2; \ brgez,pn REG1, FAIL_LABEL; \ andn REG1, REG2, REG1; \ From 780a781dd6f1af9dfac15b8eeba1cb678c9fc380 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 21 Nov 2017 09:49:25 +0100 Subject: [PATCH 0035/3326] Linux 4.14.1 --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index ccd981892ef262..01f9df1af25670 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 0 +SUBLEVEL = 1 EXTRAVERSION = -NAME = Fearless Coyote +NAME = Petit Gorille # *DOCUMENTATION* # To see a list of typical targets execute "make help" From 5d62da3a8e2aa3a056d6f7a9cc75a93a221f11b4 Mon Sep 17 00:00:00 2001 From: Michael Lyle Date: Thu, 16 Nov 2017 23:47:25 -0800 Subject: [PATCH 0036/3326] bio: ensure __bio_clone_fast copies bi_partno commit 62530ed8b1d07a45dec94d46e521c0c6c2d476e6 upstream. A new field was introduced in 74d46992e0d9, bi_partno, instead of using bdev->bd_contains and encoding the partition information in the bi_bdev field. __bio_clone_fast was changed to copy the disk information, but not the partition information. At minimum, this regressed bcache and caused data corruption. Signed-off-by: Michael Lyle Fixes: 74d46992e0d9 ("block: replace bi_bdev with a gendisk pointer and partitions index") Reported-by: Pavel Goran Reported-by: Campbell Steven Reviewed-by: Coly Li Reviewed-by: Ming Lei Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- block/bio.c | 1 + 1 file changed, 1 insertion(+) diff --git a/block/bio.c b/block/bio.c index 101c2a9b548150..33fa6b4af31220 100644 --- a/block/bio.c +++ b/block/bio.c @@ -597,6 +597,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) * so we don't set nor calculate new physical/hw segment counts here */ bio->bi_disk = bio_src->bi_disk; + bio->bi_partno = bio_src->bi_partno; bio_set_flag(bio, BIO_CLONED); bio->bi_opf = bio_src->bi_opf; bio->bi_write_hint = bio_src->bi_write_hint; From 5856c858c6eaa9a4ba2100a9c7f1748be5614546 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Thu, 9 Nov 2017 13:04:44 +0900 Subject: [PATCH 0037/3326] af_netlink: ensure that NLMSG_DONE never fails in dumps [ Upstream commit 0642840b8bb008528dbdf929cec9f65ac4231ad0 ] The way people generally use netlink_dump is that they fill in the skb as much as possible, breaking when nla_put returns an error. Then, they get called again and start filling out the next skb, and again, and so forth. The mechanism at work here is the ability for the iterative dumping function to detect when the skb is filled up and not fill it past the brim, waiting for a fresh skb for the rest of the data. However, if the attributes are small and nicely packed, it is possible that a dump callback function successfully fills in attributes until the skb is of size 4080 (libmnl's default page-sized receive buffer size). The dump function completes, satisfied, and then, if it happens to be that this is actually the last skb, and no further ones are to be sent, then netlink_dump will add on the NLMSG_DONE part: nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI); It is very important that netlink_dump does this, of course. However, in this example, that call to nlmsg_put_answer will fail, because the previous filling by the dump function did not leave it enough room. And how could it possibly have done so? All of the nla_put variety of functions simply check to see if the skb has enough tailroom, independent of the context it is in. In order to keep the important assumptions of all netlink dump users, it is therefore important to give them an skb that has this end part of the tail already reserved, so that the call to nlmsg_put_answer does not fail. Otherwise, library authors are forced to find some bizarre sized receive buffer that has a large modulo relative to the common sizes of messages received, which is ugly and buggy. This patch thus saves the NLMSG_DONE for an additional message, for the case that things are dangerously close to the brim. This requires keeping track of the errno from ->dump() across calls. Signed-off-by: Jason A. Donenfeld Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/netlink/af_netlink.c | 17 +++++++++++------ net/netlink/af_netlink.h | 1 + 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index b93148e8e9fb2d..15c99dfa3d728d 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2136,7 +2136,7 @@ static int netlink_dump(struct sock *sk) struct sk_buff *skb = NULL; struct nlmsghdr *nlh; struct module *module; - int len, err = -ENOBUFS; + int err = -ENOBUFS; int alloc_min_size; int alloc_size; @@ -2183,9 +2183,11 @@ static int netlink_dump(struct sock *sk) skb_reserve(skb, skb_tailroom(skb) - alloc_size); netlink_skb_set_owner_r(skb, sk); - len = cb->dump(skb, cb); + if (nlk->dump_done_errno > 0) + nlk->dump_done_errno = cb->dump(skb, cb); - if (len > 0) { + if (nlk->dump_done_errno > 0 || + skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) { mutex_unlock(nlk->cb_mutex); if (sk_filter(sk, skb)) @@ -2195,13 +2197,15 @@ static int netlink_dump(struct sock *sk) return 0; } - nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI); - if (!nlh) + nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, + sizeof(nlk->dump_done_errno), NLM_F_MULTI); + if (WARN_ON(!nlh)) goto errout_skb; nl_dump_check_consistent(cb, nlh); - memcpy(nlmsg_data(nlh), &len, sizeof(len)); + memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, + sizeof(nlk->dump_done_errno)); if (sk_filter(sk, skb)) kfree_skb(skb); @@ -2273,6 +2277,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, } nlk->cb_running = true; + nlk->dump_done_errno = INT_MAX; mutex_unlock(nlk->cb_mutex); diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index 028188597eaa9d..962de7b3c023d4 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -34,6 +34,7 @@ struct netlink_sock { wait_queue_head_t wait; bool bound; bool cb_running; + int dump_done_errno; struct netlink_callback cb; struct mutex *cb_mutex; struct mutex cb_def_mutex; From 829af2cd8a2ca21d0d5bc7cb46988c0a6e612f5b Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 11 Nov 2017 19:58:50 +0800 Subject: [PATCH 0038/3326] vxlan: fix the issue that neigh proxy blocks all icmpv6 packets [ Upstream commit 8bff3685a4bbf175a96bc6a528f13455d8d38244 ] Commit f1fb08f6337c ("vxlan: fix ND proxy when skb doesn't have transport header offset") removed icmp6_code and icmp6_type check before calling neigh_reduce when doing neigh proxy. It means all icmpv6 packets would be blocked by this, not only ns packet. In Jianlin's env, even ping6 couldn't work through it. This patch is to bring the icmp6_code and icmp6_type check back and also removed the same check from neigh_reduce(). Fixes: f1fb08f6337c ("vxlan: fix ND proxy when skb doesn't have transport header offset") Reported-by: Jianlin Shi Signed-off-by: Xin Long Reviewed-by: Vincent Bernat Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/vxlan.c | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index d7c49cf1d5e91e..a2f4e52fadb561 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1623,26 +1623,19 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) { struct vxlan_dev *vxlan = netdev_priv(dev); - struct nd_msg *msg; - const struct ipv6hdr *iphdr; const struct in6_addr *daddr; - struct neighbour *n; + const struct ipv6hdr *iphdr; struct inet6_dev *in6_dev; + struct neighbour *n; + struct nd_msg *msg; in6_dev = __in6_dev_get(dev); if (!in6_dev) goto out; - if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg))) - goto out; - iphdr = ipv6_hdr(skb); daddr = &iphdr->daddr; - msg = (struct nd_msg *)(iphdr + 1); - if (msg->icmph.icmp6_code != 0 || - msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) - goto out; if (ipv6_addr_loopback(daddr) || ipv6_addr_is_multicast(&msg->target)) @@ -2240,11 +2233,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_rdst *rdst, *fdst = NULL; const struct ip_tunnel_info *info; - struct ethhdr *eth; bool did_rsc = false; - struct vxlan_rdst *rdst, *fdst = NULL; struct vxlan_fdb *f; + struct ethhdr *eth; __be32 vni = 0; info = skb_tunnel_info(skb); @@ -2269,12 +2262,14 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) if (ntohs(eth->h_proto) == ETH_P_ARP) return arp_reduce(dev, skb, vni); #if IS_ENABLED(CONFIG_IPV6) - else if (ntohs(eth->h_proto) == ETH_P_IPV6) { - struct ipv6hdr *hdr, _hdr; - if ((hdr = skb_header_pointer(skb, - skb_network_offset(skb), - sizeof(_hdr), &_hdr)) && - hdr->nexthdr == IPPROTO_ICMPV6) + else if (ntohs(eth->h_proto) == ETH_P_IPV6 && + pskb_may_pull(skb, sizeof(struct ipv6hdr) + + sizeof(struct nd_msg)) && + ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { + struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1); + + if (m->icmph.icmp6_code == 0 && + m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) return neigh_reduce(dev, skb, vni); } #endif From 060dd7a476314ac393a51cf78e78957cdef0b43a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Wed, 15 Nov 2017 09:35:02 +0100 Subject: [PATCH 0039/3326] net: cdc_ncm: GetNtbFormat endian fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 6314dab4b8fb8493d810e175cb340376052c69b6 ] The GetNtbFormat and SetNtbFormat requests operate on 16 bit little endian values. We get away with ignoring this most of the time, because we only care about USB_CDC_NCM_NTB16_FORMAT which is 0x0000. This fails for USB_CDC_NCM_NTB32_FORMAT. Fix comparison between LE value from device and constant by converting the constant to LE. Reported-by: Ben Hutchings Fixes: 2b02c20ce0c2 ("cdc_ncm: Set NTB format again after altsetting switch for Huawei devices") Cc: Enrico Mioso Cc: Christian Panton Signed-off-by: Bjørn Mork Acked-By: Enrico Mioso Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/cdc_ncm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 47cab1bde0659a..9e1b74590682e1 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -771,7 +771,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ int err; u8 iface_no; struct usb_cdc_parsed_header hdr; - u16 curr_ntb_format; + __le16 curr_ntb_format; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -889,7 +889,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ goto error2; } - if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) { + if (curr_ntb_format == cpu_to_le16(USB_CDC_NCM_NTB32_FORMAT)) { dev_info(&intf->dev, "resetting NTB format to 16-bit"); err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS | USB_DIR_OUT From 7b6868743d7e0bdf1c2f41e5256fdf2e3d33b962 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 16 Nov 2017 11:07:15 +0800 Subject: [PATCH 0040/3326] fealnx: Fix building error on MIPS [ Upstream commit cc54c1d32e6a4bb3f116721abf900513173e4d02 ] This patch try to fix the building error on MIPS. The reason is MIPS has already defined the LONG macro, which conflicts with the LONG enum in drivers/net/ethernet/fealnx.c. Signed-off-by: Huacai Chen Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/fealnx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index e92859dab7aede..e191c4ebeaf414 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -257,8 +257,8 @@ enum rx_desc_status_bits { RXFSD = 0x00000800, /* first descriptor */ RXLSD = 0x00000400, /* last descriptor */ ErrorSummary = 0x80, /* error summary */ - RUNT = 0x40, /* runt packet received */ - LONG = 0x20, /* long packet received */ + RUNTPKT = 0x40, /* runt packet received */ + LONGPKT = 0x20, /* long packet received */ FAE = 0x10, /* frame align error */ CRC = 0x08, /* crc error */ RXER = 0x04, /* receive error */ @@ -1632,7 +1632,7 @@ static int netdev_rx(struct net_device *dev) dev->name, rx_status); dev->stats.rx_errors++; /* end of a packet. */ - if (rx_status & (LONG | RUNT)) + if (rx_status & (LONGPKT | RUNTPKT)) dev->stats.rx_length_errors++; if (rx_status & RXER) dev->stats.rx_frame_errors++; From 8d028694813201787efd5086352fb051bfa35790 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 15 Nov 2017 22:17:48 -0600 Subject: [PATCH 0041/3326] net/sctp: Always set scope_id in sctp_inet6_skb_msgname [ Upstream commit 7c8a61d9ee1df0fb4747879fa67a99614eb62fec ] Alexandar Potapenko while testing the kernel with KMSAN and syzkaller discovered that in some configurations sctp would leak 4 bytes of kernel stack. Working with his reproducer I discovered that those 4 bytes that are leaked is the scope id of an ipv6 address returned by recvmsg. With a little code inspection and a shrewd guess I discovered that sctp_inet6_skb_msgname only initializes the scope_id field for link local ipv6 addresses to the interface index the link local address pertains to instead of initializing the scope_id field for all ipv6 addresses. That is almost reasonable as scope_id's are meaniningful only for link local addresses. Set the scope_id in all other cases to 0 which is not a valid interface index to make it clear there is nothing useful in the scope_id field. There should be no danger of breaking userspace as the stack leak guaranteed that previously meaningless random data was being returned. Fixes: 372f525b495c ("SCTP: Resync with LKSCTP tree.") History-tree: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git Reported-by: Alexander Potapenko Tested-by: Alexander Potapenko Signed-off-by: "Eric W. Biederman" Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/ipv6.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index a6dfa86c02016e..3b18085e3b1025 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -807,9 +807,10 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, addr->v6.sin6_flowinfo = 0; addr->v6.sin6_port = sh->source; addr->v6.sin6_addr = ipv6_hdr(skb)->saddr; - if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) { + if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) addr->v6.sin6_scope_id = sctp_v6_skb_iif(skb); - } + else + addr->v6.sin6_scope_id = 0; } *addr_len = sctp_v6_addr_to_user(sctp_sk(skb->sk), addr); From 87eb84b9a0d1df942dd1676d16bb2b615e480d18 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Tue, 7 Nov 2017 11:37:07 +0100 Subject: [PATCH 0042/3326] ima: do not update security.ima if appraisal status is not INTEGRITY_PASS commit 020aae3ee58c1af0e7ffc4e2cc9fe4dc630338cb upstream. Commit b65a9cfc2c38 ("Untangling ima mess, part 2: deal with counters") moved the call of ima_file_check() from may_open() to do_filp_open() at a point where the file descriptor is already opened. This breaks the assumption made by IMA that file descriptors being closed belong to files whose access was granted by ima_file_check(). The consequence is that security.ima and security.evm are updated with good values, regardless of the current appraisal status. For example, if a file does not have security.ima, IMA will create it after opening the file for writing, even if access is denied. Access to the file will be allowed afterwards. Avoid this issue by checking the appraisal status before updating security.ima. Signed-off-by: Roberto Sassu Signed-off-by: Mimi Zohar Signed-off-by: James Morris Signed-off-by: Greg Kroah-Hartman --- security/integrity/ima/ima_appraise.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 809ba70fbbbfb3..7d769b948de893 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -320,6 +320,9 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file) if (iint->flags & IMA_DIGSIG) return; + if (iint->ima_file_status != INTEGRITY_PASS) + return; + rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo); if (rc < 0) return; From c75289a7b774d369263cef27c4e5aaf7ed59400a Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 21 Oct 2017 10:50:18 +0200 Subject: [PATCH 0043/3326] serial: omap: Fix EFR write on RTS deassertion commit 2a71de2f7366fb1aec632116d0549ec56d6a3940 upstream. Commit 348f9bb31c56 ("serial: omap: Fix RTS handling") sought to enable auto RTS upon manual RTS assertion and disable it on deassertion. However it seems the latter was done incorrectly, it clears all bits in the Extended Features Register *except* auto RTS. Fixes: 348f9bb31c56 ("serial: omap: Fix RTS handling") Cc: Peter Hurley Signed-off-by: Lukas Wunner Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/omap-serial.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 7754053deedac9..26a22b100df100 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -693,7 +693,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl) if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) up->efr |= UART_EFR_RTS; else - up->efr &= UART_EFR_RTS; + up->efr &= ~UART_EFR_RTS; serial_out(up, UART_EFR, up->efr); serial_out(up, UART_LCR, lcr); From 432a1a50d364e49d3b15a6ec412cb04c4d9bf108 Mon Sep 17 00:00:00 2001 From: "Ji-Ze Hong (Peter Hong)" Date: Tue, 17 Oct 2017 14:23:08 +0800 Subject: [PATCH 0044/3326] serial: 8250_fintek: Fix finding base_port with activated SuperIO commit fd97e66c5529046e989a0879c3bb58fddb592c71 upstream. The SuperIO will be configured at boot time by BIOS, but some BIOS will not deactivate the SuperIO when the end of configuration. It'll lead to mismatch for pdata->base_port in probe_setup_port(). So we'll deactivate all SuperIO before activate special base_port in fintek_8250_enter_key(). Tested on iBASE MI802. Tested-by: Ji-Ze Hong (Peter Hong) Signed-off-by: Ji-Ze Hong (Peter Hong) Reviewd-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_fintek.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c index e500f7dd2470a2..4bd376c08b59f5 100644 --- a/drivers/tty/serial/8250/8250_fintek.c +++ b/drivers/tty/serial/8250/8250_fintek.c @@ -118,6 +118,9 @@ static int fintek_8250_enter_key(u16 base_port, u8 key) if (!request_muxed_region(base_port, 2, "8250_fintek")) return -EBUSY; + /* Force to deactive all SuperIO in this base_port */ + outb(EXIT_KEY, base_port + ADDR_PORT); + outb(key, base_port + ADDR_PORT); outb(key, base_port + ADDR_PORT); return 0; From a371abb25a16029932fde51eb4b12420421f96da Mon Sep 17 00:00:00 2001 From: Alexander Steffen Date: Fri, 8 Sep 2017 17:21:32 +0200 Subject: [PATCH 0045/3326] tpm-dev-common: Reject too short writes commit ee70bc1e7b63ac8023c9ff9475d8741e397316e7 upstream. tpm_transmit() does not offer an explicit interface to indicate the number of valid bytes in the communication buffer. Instead, it relies on the commandSize field in the TPM header that is encoded within the buffer. Therefore, ensure that a) enough data has been written to the buffer, so that the commandSize field is present and b) the commandSize field does not announce more data than has been written to the buffer. This should have been fixed with CVE-2011-1161 long ago, but apparently a correct version of that patch never made it into the kernel. Signed-off-by: Alexander Steffen Reviewed-by: Jarkko Sakkinen Tested-by: Jarkko Sakkinen Signed-off-by: Jarkko Sakkinen Signed-off-by: Greg Kroah-Hartman --- drivers/char/tpm/tpm-dev-common.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 610638a80383d4..461bf0b8a09473 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -110,6 +110,12 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, return -EFAULT; } + if (in_size < 6 || + in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) { + mutex_unlock(&priv->buffer_mutex); + return -EINVAL; + } + /* atomic tpm command send and result receive. We only hold the ops * lock during this period so that the tpm can be unregistered even if * the char dev is held open. From 3594216fc6cf25636109c70208d51e1250102b32 Mon Sep 17 00:00:00 2001 From: Neeraj Upadhyay Date: Mon, 7 Aug 2017 11:20:10 +0530 Subject: [PATCH 0046/3326] rcu: Fix up pending cbs check in rcu_prepare_for_idle commit 135bd1a230bb69a68c9808a7d25467318900b80a upstream. The pending-callbacks check in rcu_prepare_for_idle() is backwards. It should accelerate if there are pending callbacks, but the check rather uselessly accelerates only if there are no callbacks. This commit therefore inverts this check. Fixes: 15fecf89e46a ("srcu: Abstract multi-tail callback list handling") Signed-off-by: Neeraj Upadhyay Signed-off-by: Paul E. McKenney Signed-off-by: Greg Kroah-Hartman --- kernel/rcu/tree_plugin.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index e012b9be777e3b..fed95fa941e617 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1507,7 +1507,7 @@ static void rcu_prepare_for_idle(void) rdtp->last_accelerate = jiffies; for_each_rcu_flavor(rsp) { rdp = this_cpu_ptr(rsp->rda); - if (rcu_segcblist_pend_cbs(&rdp->cblist)) + if (!rcu_segcblist_pend_cbs(&rdp->cblist)) continue; rnp = rdp->mynode; raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ From bbce81fc966f59421e5f54c4bd3d3a4670f45e33 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Tue, 14 Nov 2017 01:03:44 +0100 Subject: [PATCH 0047/3326] mm/pagewalk.c: report holes in hugetlb ranges commit 373c4557d2aa362702c4c2d41288fb1e54990b7c upstream. This matters at least for the mincore syscall, which will otherwise copy uninitialized memory from the page allocator to userspace. It is probably also a correctness error for /proc/$pid/pagemap, but I haven't tested that. Removing the `walk->hugetlb_entry` condition in walk_hugetlb_range() has no effect because the caller already checks for that. This only reports holes in hugetlb ranges to callers who have specified a hugetlb_entry callback. This issue was found using an AFL-based fuzzer. v2: - don't crash on ->pte_hole==NULL (Andrew Morton) - add Cc stable (Andrew Morton) Fixes: 1e25a271c8ac ("mincore: apply page table walker on do_mincore()") Signed-off-by: Jann Horn Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/pagewalk.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 8bd4afa83cb89f..23a3e415ac2ce5 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -188,8 +188,12 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end, do { next = hugetlb_entry_end(h, addr, end); pte = huge_pte_offset(walk->mm, addr & hmask, sz); - if (pte && walk->hugetlb_entry) + + if (pte) err = walk->hugetlb_entry(pte, hmask, addr, next, walk); + else if (walk->pte_hole) + err = walk->pte_hole(addr, next, walk); + if (err) break; } while (addr = next, addr != end); From e250a1993727dbe4b10831ca8ddf30745b3d545a Mon Sep 17 00:00:00 2001 From: Changwei Ge Date: Wed, 15 Nov 2017 17:31:33 -0800 Subject: [PATCH 0048/3326] ocfs2: fix cluster hang after a node dies commit 1c01967116a678fed8e2c68a6ab82abc8effeddc upstream. When a node dies, other live nodes have to choose a new master for an existed lock resource mastered by the dead node. As for ocfs2/dlm implementation, this is done by function - dlm_move_lockres_to_recovery_list which marks those lock rsources as DLM_LOCK_RES_RECOVERING and manages them via a list from which DLM changes lock resource's master later. So without invoking dlm_move_lockres_to_recovery_list, no master will be choosed after dlm recovery accomplishment since no lock resource can be found through ::resource list. What's worse is that if DLM_LOCK_RES_RECOVERING is not marked for lock resources mastered a dead node, it will break up synchronization among nodes. So invoke dlm_move_lockres_to_recovery_list again. Fixs: 'commit ee8f7fcbe638 ("ocfs2/dlm: continue to purge recovery lockres when recovery master goes down")' Link: http://lkml.kernel.org/r/63ADC13FD55D6546B7DECE290D39E373CED6E0F9@H3CMLB14-EX.srv.huawei-3com.com Signed-off-by: Changwei Ge Reported-by: Vitaly Mayatskih Tested-by: Vitaly Mayatskikh Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Cc: Joseph Qi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/ocfs2/dlm/dlmrecovery.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 74407c6dd592a5..ec8f75813beb48 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -2419,6 +2419,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) dlm_lockres_put(res); continue; } + dlm_move_lockres_to_recovery_list(dlm, res); } else if (res->owner == dlm->node_num) { dlm_free_dead_locks(dlm, res, dead_node); __dlm_lockres_calc_usage(dlm, res); From 44ec0aecc73dc3959bdbb6fa733c69dc0bb633f4 Mon Sep 17 00:00:00 2001 From: alex chen Date: Wed, 15 Nov 2017 17:31:40 -0800 Subject: [PATCH 0049/3326] ocfs2: should wait dio before inode lock in ocfs2_setattr() commit 28f5a8a7c033cbf3e32277f4cc9c6afd74f05300 upstream. we should wait dio requests to finish before inode lock in ocfs2_setattr(), otherwise the following deadlock will happen: process 1 process 2 process 3 truncate file 'A' end_io of writing file 'A' receiving the bast messages ocfs2_setattr ocfs2_inode_lock_tracker ocfs2_inode_lock_full inode_dio_wait __inode_dio_wait -->waiting for all dio requests finish dlm_proxy_ast_handler dlm_do_local_bast ocfs2_blocking_ast ocfs2_generic_handle_bast set OCFS2_LOCK_BLOCKED flag dio_end_io dio_bio_end_aio dio_complete ocfs2_dio_end_io ocfs2_dio_end_io_write ocfs2_inode_lock __ocfs2_cluster_lock ocfs2_wait_for_mask -->waiting for OCFS2_LOCK_BLOCKED flag to be cleared, that is waiting for 'process 1' unlocking the inode lock inode_dio_end -->here dec the i_dio_count, but will never be called, so a deadlock happened. Link: http://lkml.kernel.org/r/59F81636.70508@huawei.com Signed-off-by: Alex Chen Reviewed-by: Jun Piao Reviewed-by: Joseph Qi Acked-by: Changwei Ge Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/ocfs2/file.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 6e41fc8fabbe78..dc455d45a66aed 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1161,6 +1161,13 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) } size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE; if (size_change) { + /* + * Here we should wait dio to finish before inode lock + * to avoid a deadlock between ocfs2_setattr() and + * ocfs2_dio_end_io_write() + */ + inode_dio_wait(inode); + status = ocfs2_rw_lock(inode, 1); if (status < 0) { mlog_errno(status); @@ -1200,8 +1207,6 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) if (status) goto bail_unlock; - inode_dio_wait(inode); - if (i_size_read(inode) >= attr->ia_size) { if (ocfs2_should_order_data(inode)) { status = ocfs2_begin_ordered_truncate(inode, From 3a46857604044d129dd7ac35f913c50627d37c4e Mon Sep 17 00:00:00 2001 From: Corey Minyard Date: Sat, 29 Jul 2017 21:14:55 -0500 Subject: [PATCH 0050/3326] ipmi: fix unsigned long underflow commit 392a17b10ec4320d3c0e96e2a23ebaad1123b989 upstream. When I set the timeout to a specific value such as 500ms, the timeout event will not happen in time due to the overflow in function check_msg_timeout: ... ent->timeout -= timeout_period; if (ent->timeout > 0) return; ... The type of timeout_period is long, but ent->timeout is unsigned long. This patch makes the type consistent. Reported-by: Weilong Chen Signed-off-by: Corey Minyard Tested-by: Weilong Chen Signed-off-by: Greg Kroah-Hartman --- drivers/char/ipmi/ipmi_msghandler.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 810b138f5897b8..c82d9fd2f05af6 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -4030,7 +4030,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, } static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, - struct list_head *timeouts, long timeout_period, + struct list_head *timeouts, + unsigned long timeout_period, int slot, unsigned long *flags, unsigned int *waiting_msgs) { @@ -4043,8 +4044,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, if (!ent->inuse) return; - ent->timeout -= timeout_period; - if (ent->timeout > 0) { + if (timeout_period < ent->timeout) { + ent->timeout -= timeout_period; (*waiting_msgs)++; return; } @@ -4110,7 +4111,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, } } -static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period) +static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, + unsigned long timeout_period) { struct list_head timeouts; struct ipmi_recv_msg *msg, *msg2; From 5a77c92fa1403812ea5b85ad7b7a5091db1c493b Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 15 Nov 2017 17:38:41 -0800 Subject: [PATCH 0051/3326] mm/page_alloc.c: broken deferred calculation commit d135e5750205a21a212a19dbb05aeb339e2cbea7 upstream. In reset_deferred_meminit() we determine number of pages that must not be deferred. We initialize pages for at least 2G of memory, but also pages for reserved memory in this node. The reserved memory is determined in this function: memblock_reserved_memory_within(), which operates over physical addresses, and returns size in bytes. However, reset_deferred_meminit() assumes that that this function operates with pfns, and returns page count. The result is that in the best case machine boots slower than expected due to initializing more pages than needed in single thread, and in the worst case panics because fewer than needed pages are initialized early. Link: http://lkml.kernel.org/r/20171021011707.15191-1-pasha.tatashin@oracle.com Fixes: 864b9a393dcb ("mm: consider memblock reservations for deferred memory initialization sizing") Signed-off-by: Pavel Tatashin Acked-by: Michal Hocko Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/mmzone.h | 3 ++- mm/page_alloc.c | 27 ++++++++++++++++++--------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c9c4a81b976711..18b06983131a35 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -700,7 +700,8 @@ typedef struct pglist_data { * is the first PFN that needs to be initialised. */ unsigned long first_deferred_pfn; - unsigned long static_init_size; + /* Number of non-deferred pages */ + unsigned long static_init_pgcnt; #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 77e4d3c5c57b72..82a6270c9743c9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -290,28 +290,37 @@ EXPORT_SYMBOL(nr_online_nodes); int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT + +/* + * Determine how many pages need to be initialized durig early boot + * (non-deferred initialization). + * The value of first_deferred_pfn will be set later, once non-deferred pages + * are initialized, but for now set it ULONG_MAX. + */ static inline void reset_deferred_meminit(pg_data_t *pgdat) { - unsigned long max_initialise; - unsigned long reserved_lowmem; + phys_addr_t start_addr, end_addr; + unsigned long max_pgcnt; + unsigned long reserved; /* * Initialise at least 2G of a node but also take into account that * two large system hashes that can take up 1GB for 0.25TB/node. */ - max_initialise = max(2UL << (30 - PAGE_SHIFT), - (pgdat->node_spanned_pages >> 8)); + max_pgcnt = max(2UL << (30 - PAGE_SHIFT), + (pgdat->node_spanned_pages >> 8)); /* * Compensate the all the memblock reservations (e.g. crash kernel) * from the initial estimation to make sure we will initialize enough * memory to boot. */ - reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn, - pgdat->node_start_pfn + max_initialise); - max_initialise += reserved_lowmem; + start_addr = PFN_PHYS(pgdat->node_start_pfn); + end_addr = PFN_PHYS(pgdat->node_start_pfn + max_pgcnt); + reserved = memblock_reserved_memory_within(start_addr, end_addr); + max_pgcnt += PHYS_PFN(reserved); - pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages); + pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages); pgdat->first_deferred_pfn = ULONG_MAX; } @@ -338,7 +347,7 @@ static inline bool update_defer_init(pg_data_t *pgdat, if (zone_end < pgdat_end_pfn(pgdat)) return true; (*nr_initialised)++; - if ((*nr_initialised > pgdat->static_init_size) && + if ((*nr_initialised > pgdat->static_init_pgcnt) && (pfn & (PAGES_PER_SECTION - 1)) == 0) { pgdat->first_deferred_pfn = pfn; return false; From cb58b8f8a8d7d874690c7530b320a5c90bf0f471 Mon Sep 17 00:00:00 2001 From: Jaewon Kim Date: Wed, 15 Nov 2017 17:39:07 -0800 Subject: [PATCH 0052/3326] mm/page_ext.c: check if page_ext is not prepared commit e492080e640c2d1235ddf3441cae634cfffef7e1 upstream. online_page_ext() and page_ext_init() allocate page_ext for each section, but they do not allocate if the first PFN is !pfn_present(pfn) or !pfn_valid(pfn). Then section->page_ext remains as NULL. lookup_page_ext checks NULL only if CONFIG_DEBUG_VM is enabled. For a valid PFN, __set_page_owner will try to get page_ext through lookup_page_ext. Without CONFIG_DEBUG_VM lookup_page_ext will misuse NULL pointer as value 0. This incurrs invalid address access. This is the panic example when PFN 0x100000 is not valid but PFN 0x13FC00 is being used for page_ext. section->page_ext is NULL, get_entry returned invalid page_ext address as 0x1DFA000 for a PFN 0x13FC00. To avoid this panic, CONFIG_DEBUG_VM should be removed so that page_ext will be checked at all times. Unable to handle kernel paging request at virtual address 01dfa014 ------------[ cut here ]------------ Kernel BUG at ffffff80082371e0 [verbose debug info unavailable] Internal error: Oops: 96000045 [#1] PREEMPT SMP Modules linked in: PC is at __set_page_owner+0x48/0x78 LR is at __set_page_owner+0x44/0x78 __set_page_owner+0x48/0x78 get_page_from_freelist+0x880/0x8e8 __alloc_pages_nodemask+0x14c/0xc48 __do_page_cache_readahead+0xdc/0x264 filemap_fault+0x2ac/0x550 ext4_filemap_fault+0x3c/0x58 __do_fault+0x80/0x120 handle_mm_fault+0x704/0xbb0 do_page_fault+0x2e8/0x394 do_mem_abort+0x88/0x124 Pre-4.7 kernels also need commit f86e4271978b ("mm: check the return value of lookup_page_ext for all call sites"). Link: http://lkml.kernel.org/r/20171107094131.14621-1-jaewon31.kim@samsung.com Fixes: eefa864b701d ("mm/page_ext: resurrect struct page extending code for debugging") Signed-off-by: Jaewon Kim Acked-by: Michal Hocko Cc: Vlastimil Babka Cc: Minchan Kim Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/page_ext.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mm/page_ext.c b/mm/page_ext.c index 4f0367d472c492..2c16216c29b629 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -125,7 +125,6 @@ struct page_ext *lookup_page_ext(struct page *page) struct page_ext *base; base = NODE_DATA(page_to_nid(page))->node_page_ext; -#if defined(CONFIG_DEBUG_VM) /* * The sanity checks the page allocator does upon freeing a * page can reach here before the page_ext arrays are @@ -134,7 +133,6 @@ struct page_ext *lookup_page_ext(struct page *page) */ if (unlikely(!base)) return NULL; -#endif index = pfn - round_down(node_start_pfn(page_to_nid(page)), MAX_ORDER_NR_PAGES); return get_entry(base, index); @@ -199,7 +197,6 @@ struct page_ext *lookup_page_ext(struct page *page) { unsigned long pfn = page_to_pfn(page); struct mem_section *section = __pfn_to_section(pfn); -#if defined(CONFIG_DEBUG_VM) /* * The sanity checks the page allocator does upon freeing a * page can reach here before the page_ext arrays are @@ -208,7 +205,6 @@ struct page_ext *lookup_page_ext(struct page *page) */ if (!section->page_ext) return NULL; -#endif return get_entry(section->page_ext, pfn); } From 71d3850bf1e9458ed6a6119abe084e253128dee5 Mon Sep 17 00:00:00 2001 From: Jan Harkes Date: Wed, 27 Sep 2017 15:52:12 -0400 Subject: [PATCH 0053/3326] coda: fix 'kernel memory exposure attempt' in fsync commit d337b66a4c52c7b04eec661d86c2ef6e168965a2 upstream. When an application called fsync on a file in Coda a small request with just the file identifier was allocated, but the declared length was set to the size of union of all possible upcall requests. This bug has been around for a very long time and is now caught by the extra checking in usercopy that was introduced in Linux-4.8. The exposure happens when the Coda cache manager process reads the fsync upcall request at which point it is killed. As a result there is nobody servicing any further upcalls, trapping any processes that try to access the mounted Coda filesystem. Signed-off-by: Jan Harkes Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- fs/coda/upcall.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c index a37f003530d735..1175a1722411d3 100644 --- a/fs/coda/upcall.c +++ b/fs/coda/upcall.c @@ -447,8 +447,7 @@ int venus_fsync(struct super_block *sb, struct CodaFid *fid) UPARG(CODA_FSYNC); inp->coda_fsync.VFid = *fid; - error = coda_upcall(coda_vcp(sb), sizeof(union inputArgs), - &outsize, inp); + error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); CODA_FREE(inp, insize); return error; From 8d8564fce80dfafa8e90c345161aff734d9730cd Mon Sep 17 00:00:00 2001 From: Corey Minyard Date: Fri, 8 Sep 2017 14:05:58 -0500 Subject: [PATCH 0054/3326] ipmi: Prefer ACPI system interfaces over SMBIOS ones commit 7e030d6dff713250c7dcfb543cad2addaf479b0e upstream. The recent changes to add SMBIOS (DMI) IPMI interfaces as platform devices caused DMI to be selected before ACPI, causing ACPI type of operations to not work. Signed-off-by: Corey Minyard Signed-off-by: Greg Kroah-Hartman --- drivers/char/ipmi/ipmi_si_intf.c | 33 ++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 36f47e8d06a3b9..bc3984ffe86797 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -3424,7 +3424,7 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info) del_timer_sync(&smi_info->si_timer); } -static int is_new_interface(struct smi_info *info) +static struct smi_info *find_dup_si(struct smi_info *info) { struct smi_info *e; @@ -3439,24 +3439,36 @@ static int is_new_interface(struct smi_info *info) */ if (info->slave_addr && !e->slave_addr) e->slave_addr = info->slave_addr; - return 0; + return e; } } - return 1; + return NULL; } static int add_smi(struct smi_info *new_smi) { int rv = 0; + struct smi_info *dup; mutex_lock(&smi_infos_lock); - if (!is_new_interface(new_smi)) { - pr_info(PFX "%s-specified %s state machine: duplicate\n", - ipmi_addr_src_to_str(new_smi->addr_source), - si_to_str[new_smi->si_type]); - rv = -EBUSY; - goto out_err; + dup = find_dup_si(new_smi); + if (dup) { + if (new_smi->addr_source == SI_ACPI && + dup->addr_source == SI_SMBIOS) { + /* We prefer ACPI over SMBIOS. */ + dev_info(dup->dev, + "Removing SMBIOS-specified %s state machine in favor of ACPI\n", + si_to_str[new_smi->si_type]); + cleanup_one_si(dup); + } else { + dev_info(new_smi->dev, + "%s-specified %s state machine: duplicate\n", + ipmi_addr_src_to_str(new_smi->addr_source), + si_to_str[new_smi->si_type]); + rv = -EBUSY; + goto out_err; + } } pr_info(PFX "Adding %s-specified %s state machine\n", @@ -3865,7 +3877,8 @@ static void cleanup_one_si(struct smi_info *to_clean) poll(to_clean); schedule_timeout_uninterruptible(1); } - disable_si_irq(to_clean, false); + if (to_clean->handlers) + disable_si_irq(to_clean, false); while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { poll(to_clean); schedule_timeout_uninterruptible(1); From f9f0b03dedc19a6363a305d119efcb48667a3027 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 24 Nov 2017 08:37:05 +0100 Subject: [PATCH 0055/3326] Linux 4.14.2 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 01f9df1af25670..75d89dc2b94ac8 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 1 +SUBLEVEL = 2 EXTRAVERSION = NAME = Petit Gorille From 236f6e7283f593cf485b5233c8f540cf9f8120b9 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 9 Nov 2017 12:29:34 +0100 Subject: [PATCH 0056/3326] s390: fix transactional execution control register handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a1c5befc1c24eb9c1ee83f711e0f21ee79cbb556 upstream. Dan Horák reported the following crash related to transactional execution: User process fault: interruption code 0013 ilc:3 in libpthread-2.26.so[3ff93c00000+1b000] CPU: 2 PID: 1 Comm: /init Not tainted 4.13.4-300.fc27.s390x #1 Hardware name: IBM 2827 H43 400 (z/VM 6.4.0) task: 00000000fafc8000 task.stack: 00000000fafc4000 User PSW : 0705200180000000 000003ff93c14e70 R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:1 AS:0 CC:2 PM:0 RI:0 EA:3 User GPRS: 0000000000000077 000003ff00000000 000003ff93144d48 000003ff93144d5e 0000000000000000 0000000000000002 0000000000000000 000003ff00000000 0000000000000000 0000000000000418 0000000000000000 000003ffcc9fe770 000003ff93d28f50 000003ff9310acf0 000003ff92b0319a 000003ffcc9fe6d0 User Code: 000003ff93c14e62: 60e0b030 std %f14,48(%r11) 000003ff93c14e66: 60f0b038 std %f15,56(%r11) #000003ff93c14e6a: e5600000ff0e tbegin 0,65294 >000003ff93c14e70: a7740006 brc 7,3ff93c14e7c 000003ff93c14e74: a7080000 lhi %r0,0 000003ff93c14e78: a7f40023 brc 15,3ff93c14ebe 000003ff93c14e7c: b2220000 ipm %r0 000003ff93c14e80: 8800001c srl %r0,28 There are several bugs with control register handling with respect to transactional execution: - on task switch update_per_regs() is only called if the next task has an mm (is not a kernel thread). This however is incorrect. This breaks e.g. for user mode helper handling, where the kernel creates a kernel thread and then execve's a user space program. Control register contents related to transactional execution won't be updated on execve. If the previous task ran with transactional execution disabled then the new task will also run with transactional execution disabled, which is incorrect. Therefore call update_per_regs() unconditionally within switch_to(). - on startup the transactional execution facility is not enabled for the idle thread. This is not really a bug, but an inconsistency to other facilities. Therefore enable the facility if it is available. - on fork the new thread's per_flags field is not cleared. This means that a child process inherits the PER_FLAG_NO_TE flag. This flag can be set with a ptrace request to disable transactional execution for the current process. It should not be inherited by new child processes in order to be consistent with the handling of all other PER related debugging options. Therefore clear the per_flags field in copy_thread_tls(). Reported-and-tested-by: Dan Horák Fixes: d35339a42dd1 ("s390: add support for transactional memory") Cc: Martin Schwidefsky Reviewed-by: Christian Borntraeger Reviewed-by: Hendrik Brueckner Signed-off-by: Heiko Carstens Signed-off-by: Greg Kroah-Hartman --- arch/s390/include/asm/switch_to.h | 2 +- arch/s390/kernel/early.c | 4 +++- arch/s390/kernel/process.c | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index c21fe1d57c0096..ec7b476c1ac571 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -37,8 +37,8 @@ static inline void restore_access_regs(unsigned int *acrs) save_ri_cb(prev->thread.ri_cb); \ save_gs_cb(prev->thread.gs_cb); \ } \ + update_cr_regs(next); \ if (next->mm) { \ - update_cr_regs(next); \ set_cpu_flag(CIF_FPU); \ restore_access_regs(&next->thread.acrs[0]); \ restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index b945448b9eae8c..f7b280f0ab1682 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -375,8 +375,10 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; if (test_facility(40)) S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; - if (test_facility(50) && test_facility(73)) + if (test_facility(50) && test_facility(73)) { S390_lowcore.machine_flags |= MACHINE_FLAG_TE; + __ctl_set_bit(0, 55); + } if (test_facility(51)) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; if (test_facility(129)) { diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index a4a84fb0804683..203b7cd7c34896 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -100,6 +100,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp, memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); clear_tsk_thread_flag(p, TIF_SINGLE_STEP); + p->thread.per_flags = 0; /* Initialize per thread user and system timer values */ p->thread.user_timer = 0; p->thread.guest_timer = 0; From 21caac65d29b7d985a7cd1672ad1dd316b227974 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 9 Nov 2017 23:00:14 +0100 Subject: [PATCH 0057/3326] s390/noexec: execute kexec datamover without DAT commit d0e810eeb3d326978f248b8f0233a2f30f58c72d upstream. Rebooting into a new kernel with kexec fails (system dies) if tried on a machine that has no-execute support. Reason for this is that the so called datamover code gets executed with DAT on (MMU is active) and the page that contains the datamover is marked as non-executable. Therefore when branching into the datamover an unexpected program check happens and afterwards the machine is dead. This can be simply avoided by disabling DAT, which also disables any no-execute checks, just before the datamover gets executed. In fact the first thing done by the datamover is to disable DAT. The code in the datamover that disables DAT can be removed as well. Thanks to Michael Holzheu and Gerald Schaefer for tracking this down. Reviewed-by: Michael Holzheu Reviewed-by: Philipp Rudo Cc: Gerald Schaefer Cc: Martin Schwidefsky Fixes: 57d7f939e7bd ("s390: add no-execute support") Signed-off-by: Heiko Carstens Signed-off-by: Greg Kroah-Hartman --- arch/s390/kernel/machine_kexec.c | 1 + arch/s390/kernel/relocate_kernel.S | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index b0ba2c26b45e4e..d6f7782e75c93d 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -269,6 +269,7 @@ static void __do_machine_kexec(void *data) s390_reset_system(); data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); + __arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */ /* Call the moving routine */ (*data_mover)(&image->head, image->start); diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S index ca37e5d5b40cc3..9c2c96da23d029 100644 --- a/arch/s390/kernel/relocate_kernel.S +++ b/arch/s390/kernel/relocate_kernel.S @@ -29,7 +29,6 @@ ENTRY(relocate_kernel) basr %r13,0 # base address .base: - stnsm sys_msk-.base(%r13),0xfb # disable DAT stctg %c0,%c15,ctlregs-.base(%r13) stmg %r0,%r15,gprregs-.base(%r13) lghi %r0,3 @@ -103,8 +102,6 @@ ENTRY(relocate_kernel) .align 8 load_psw: .long 0x00080000,0x80000000 - sys_msk: - .quad 0 ctlregs: .rept 16 .quad 0 From 27576413d9484cd9df933e3e4c802cb0cfb9c915 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 11 Sep 2017 11:24:22 +0200 Subject: [PATCH 0058/3326] s390/runtime instrumention: fix possible memory corruption commit d6e646ad7cfa7034d280459b2b2546288f247144 upstream. For PREEMPT enabled kernels the runtime instrumentation (RI) code contains a possible use-after-free bug. If a task that makes use of RI exits, it will execute do_exit() while still enabled for preemption. That function will call exit_thread_runtime_instr() via exit_thread(). If exit_thread_runtime_instr() gets preempted after the RI control block of the task has been freed but before the pointer to it is set to NULL, then save_ri_cb(), called from switch_to(), will write to already freed memory. Avoid this and simply disable preemption while freeing the control block and setting the pointer to NULL. Fixes: e4b8b3f33fca ("s390: add support for runtime instrumentation") Reviewed-by: Christian Borntraeger Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Greg Kroah-Hartman --- arch/s390/kernel/runtime_instr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index 32aefb215e59ff..d85c64821a6b1b 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -50,11 +50,13 @@ void exit_thread_runtime_instr(void) { struct task_struct *task = current; + preempt_disable(); if (!task->thread.ri_cb) return; disable_runtime_instr(); kfree(task->thread.ri_cb); task->thread.ri_cb = NULL; + preempt_enable(); } SYSCALL_DEFINE1(s390_runtime_instr, int, command) @@ -65,9 +67,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command) return -EOPNOTSUPP; if (command == S390_RUNTIME_INSTR_STOP) { - preempt_disable(); exit_thread_runtime_instr(); - preempt_enable(); return 0; } From 7ee3f026aeb388248ecdc5cd6760169ee6a9e5af Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 11 Sep 2017 11:24:22 +0200 Subject: [PATCH 0059/3326] s390/guarded storage: fix possible memory corruption commit fa1edf3f63c05ca8eacafcd7048ed91e5360f1a8 upstream. For PREEMPT enabled kernels the guarded storage (GS) code contains a possible use-after-free bug. If a task that makes use of GS exits, it will execute do_exit() while still enabled for preemption. That function will call exit_thread_runtime_instr() via exit_thread(). If exit_thread_gs() gets preempted after the GS control block of the task has been freed but before the pointer to it is set to NULL, then save_gs_cb(), called from switch_to(), will write to already freed memory. Avoid this and simply disable preemption while freeing the control block and setting the pointer to NULL. Fixes: 916cda1aa1b4 ("s390: add a system call for guarded storage") Reviewed-by: Christian Borntraeger Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Greg Kroah-Hartman --- arch/s390/kernel/guarded_storage.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/s390/kernel/guarded_storage.c b/arch/s390/kernel/guarded_storage.c index bff39b66c9ffde..9ee794e14f33e7 100644 --- a/arch/s390/kernel/guarded_storage.c +++ b/arch/s390/kernel/guarded_storage.c @@ -14,9 +14,11 @@ void exit_thread_gs(void) { + preempt_disable(); kfree(current->thread.gs_cb); kfree(current->thread.gs_bc_cb); current->thread.gs_cb = current->thread.gs_bc_cb = NULL; + preempt_enable(); } static int gs_enable(void) From 15e82cdb4d8da6c9ab0782c8bea008d03f52a97a Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 26 Sep 2017 09:16:48 +0200 Subject: [PATCH 0060/3326] s390/disassembler: add missing end marker for e7 table commit 5c50538752af7968f53924b22dede8ed4ce4cb3b upstream. The e7 opcode table does not have an end marker. Hence when trying to find an unknown e7 instruction the code will access memory behind the table until it finds something that matches the opcode, or the kernel crashes, whatever comes first. This affects not only the in-kernel disassembler but also uprobes and kprobes which refuse to set a probe on unknown instructions, and therefore search the opcode tables to figure out if instructions are known or not. Fixes: 3585cb0280654 ("s390/disassembler: add vector instructions") Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Greg Kroah-Hartman --- arch/s390/kernel/dis.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index f7e82302a71ef8..d9970c15f79dd9 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -1548,6 +1548,7 @@ static struct s390_insn opcode_e7[] = { { "vfsq", 0xce, INSTR_VRR_VV000MM }, { "vfs", 0xe2, INSTR_VRR_VVV00MM }, { "vftci", 0x4a, INSTR_VRI_VVIMM }, + { "", 0, INSTR_INVALID } }; static struct s390_insn opcode_eb[] = { From 6047980008d8db5de9ef5c7ad897f5d2caac003c Mon Sep 17 00:00:00 2001 From: Vasily Gorbik Date: Wed, 15 Nov 2017 14:15:36 +0100 Subject: [PATCH 0061/3326] s390/disassembler: increase show_code buffer size commit b192571d1ae375e0bbe0aa3ccfa1a3c3704454b9 upstream. Current buffer size of 64 is too small. objdump shows that there are instructions which would require up to 75 bytes buffer (with current formating). 128 bytes "ought to be enough for anybody". Also replaces 8 spaces with a single tab to reduce the memory footprint. Fixes the following KASAN finding: BUG: KASAN: stack-out-of-bounds in number+0x3fe/0x538 Write of size 1 at addr 000000005a4a75a0 by task bash/1282 CPU: 1 PID: 1282 Comm: bash Not tainted 4.14.0+ #215 Hardware name: IBM 2964 N96 702 (z/VM 6.4.0) Call Trace: ([<000000000011eeb6>] show_stack+0x56/0x88) [<0000000000e1ce1a>] dump_stack+0x15a/0x1b0 [<00000000004e2994>] print_address_description+0xf4/0x288 [<00000000004e2cf2>] kasan_report+0x13a/0x230 [<0000000000e38ae6>] number+0x3fe/0x538 [<0000000000e3dfe4>] vsnprintf+0x194/0x948 [<0000000000e3ea42>] sprintf+0xa2/0xb8 [<00000000001198dc>] print_insn+0x374/0x500 [<0000000000119346>] show_code+0x4ee/0x538 [<000000000011f234>] show_registers+0x34c/0x388 [<000000000011f2ae>] show_regs+0x3e/0xa8 [<000000000011f502>] die+0x1ea/0x2e8 [<0000000000138f0e>] do_no_context+0x106/0x168 [<0000000000139a1a>] do_protection_exception+0x4da/0x7d0 [<0000000000e55914>] pgm_check_handler+0x16c/0x1c0 [<000000000090639e>] sysrq_handle_crash+0x46/0x58 ([<0000000000000007>] 0x7) [<00000000009073fa>] __handle_sysrq+0x102/0x218 [<0000000000907c06>] write_sysrq_trigger+0xd6/0x100 [<000000000061d67a>] proc_reg_write+0xb2/0x128 [<0000000000520be6>] __vfs_write+0xee/0x368 [<0000000000521222>] vfs_write+0x21a/0x278 [<000000000052156a>] SyS_write+0xda/0x178 [<0000000000e555cc>] system_call+0xc4/0x270 The buggy address belongs to the page: page:000003d1016929c0 count:0 mapcount:0 mapping: (null) index:0x0 flags: 0x0() raw: 0000000000000000 0000000000000000 0000000000000000 ffffffff00000000 raw: 0000000000000100 0000000000000200 0000000000000000 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: 000000005a4a7480: 00 00 00 00 00 00 00 00 00 00 00 00 f1 f1 f1 f1 000000005a4a7500: 00 00 00 00 00 00 00 00 f2 f2 f2 f2 00 00 00 00 >000000005a4a7580: 00 00 00 00 f3 f3 f3 f3 00 00 00 00 00 00 00 00 ^ 000000005a4a7600: 00 00 00 00 00 00 00 00 00 00 f1 f1 f1 f1 f8 f8 000000005a4a7680: f2 f2 f2 f2 f2 f2 f8 f8 f2 f2 f3 f3 f3 f3 00 00 ================================================================== Signed-off-by: Vasily Gorbik Signed-off-by: Martin Schwidefsky Signed-off-by: Greg Kroah-Hartman --- arch/s390/kernel/dis.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index d9970c15f79dd9..2394557653d57c 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -1954,7 +1954,7 @@ void show_code(struct pt_regs *regs) { char *mode = user_mode(regs) ? "User" : "Krnl"; unsigned char code[64]; - char buffer[64], *ptr; + char buffer[128], *ptr; mm_segment_t old_fs; unsigned long addr; int start, end, opsize, hops, i; @@ -2017,7 +2017,7 @@ void show_code(struct pt_regs *regs) start += opsize; pr_cont("%s", buffer); ptr = buffer; - ptr += sprintf(ptr, "\n "); + ptr += sprintf(ptr, "\n\t "); hops++; } pr_cont("\n"); From ef2b11c0a31df460cfda8bdc1a79806f9f95a3c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 7 Nov 2017 23:08:10 +0200 Subject: [PATCH 0062/3326] ACPI / PM: Fix acpi_pm_notifier_lock vs flush_workqueue() deadlock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit ff1656790b3a4caca94505c52fd0250f981ea187 upstream. acpi_remove_pm_notifier() ends up calling flush_workqueue() while holding acpi_pm_notifier_lock, and that same lock is taken by by the work via acpi_pm_notify_handler(). This can deadlock. To fix the problem let's split the single lock into two: one to protect the dev->wakeup between the work vs. add/remove, and another one to handle notifier installation vs. removal. After commit a1d14934ea4b "workqueue/lockdep: 'Fix' flush_work() annotation" I was able to kill the machine (Intel Braswell) very easily with 'powertop --auto-tune', runtime suspending i915, and trying to wake it up via the USB keyboard. The cases when it didn't die are presumably explained by lockdep getting disabled by something else (cpu hotplug locking issues usually). Fortunately I still got a lockdep report over netconsole (trickling in very slowly), even though the machine was otherwise practically dead: [ 112.179806] ====================================================== [ 114.670858] WARNING: possible circular locking dependency detected [ 117.155663] 4.13.0-rc6-bsw-bisect-00169-ga1d14934ea4b #119 Not tainted [ 119.658101] ------------------------------------------------------ [ 121.310242] xhci_hcd 0000:00:14.0: xHCI host not responding to stop endpoint command. [ 121.313294] xhci_hcd 0000:00:14.0: xHCI host controller not responding, assume dead [ 121.313346] xhci_hcd 0000:00:14.0: HC died; cleaning up [ 121.313485] usb 1-6: USB disconnect, device number 3 [ 121.313501] usb 1-6.2: USB disconnect, device number 4 [ 134.747383] kworker/0:2/47 is trying to acquire lock: [ 137.220790] (acpi_pm_notifier_lock){+.+.}, at: [] acpi_pm_notify_handler+0x2f/0x80 [ 139.721524] [ 139.721524] but task is already holding lock: [ 144.672922] ((&dpc->work)){+.+.}, at: [] process_one_work+0x160/0x720 [ 147.184450] [ 147.184450] which lock already depends on the new lock. [ 147.184450] [ 154.604711] [ 154.604711] the existing dependency chain (in reverse order) is: [ 159.447888] [ 159.447888] -> #2 ((&dpc->work)){+.+.}: [ 164.183486] __lock_acquire+0x1255/0x13f0 [ 166.504313] lock_acquire+0xb5/0x210 [ 168.778973] process_one_work+0x1b9/0x720 [ 171.030316] worker_thread+0x4c/0x440 [ 173.257184] kthread+0x154/0x190 [ 175.456143] ret_from_fork+0x27/0x40 [ 177.624348] [ 177.624348] -> #1 ("kacpi_notify"){+.+.}: [ 181.850351] __lock_acquire+0x1255/0x13f0 [ 183.941695] lock_acquire+0xb5/0x210 [ 186.046115] flush_workqueue+0xdd/0x510 [ 190.408153] acpi_os_wait_events_complete+0x31/0x40 [ 192.625303] acpi_remove_notify_handler+0x133/0x188 [ 194.820829] acpi_remove_pm_notifier+0x56/0x90 [ 196.989068] acpi_dev_pm_detach+0x5f/0xa0 [ 199.145866] dev_pm_domain_detach+0x27/0x30 [ 201.285614] i2c_device_probe+0x100/0x210 [ 203.411118] driver_probe_device+0x23e/0x310 [ 205.522425] __driver_attach+0xa3/0xb0 [ 207.634268] bus_for_each_dev+0x69/0xa0 [ 209.714797] driver_attach+0x1e/0x20 [ 211.778258] bus_add_driver+0x1bc/0x230 [ 213.837162] driver_register+0x60/0xe0 [ 215.868162] i2c_register_driver+0x42/0x70 [ 217.869551] 0xffffffffa0172017 [ 219.863009] do_one_initcall+0x45/0x170 [ 221.843863] do_init_module+0x5f/0x204 [ 223.817915] load_module+0x225b/0x29b0 [ 225.757234] SyS_finit_module+0xc6/0xd0 [ 227.661851] do_syscall_64+0x5c/0x120 [ 229.536819] return_from_SYSCALL_64+0x0/0x7a [ 231.392444] [ 231.392444] -> #0 (acpi_pm_notifier_lock){+.+.}: [ 235.124914] check_prev_add+0x44e/0x8a0 [ 237.024795] __lock_acquire+0x1255/0x13f0 [ 238.937351] lock_acquire+0xb5/0x210 [ 240.840799] __mutex_lock+0x75/0x940 [ 242.709517] mutex_lock_nested+0x1c/0x20 [ 244.551478] acpi_pm_notify_handler+0x2f/0x80 [ 246.382052] acpi_ev_notify_dispatch+0x44/0x5c [ 248.194412] acpi_os_execute_deferred+0x14/0x30 [ 250.003925] process_one_work+0x1ec/0x720 [ 251.803191] worker_thread+0x4c/0x440 [ 253.605307] kthread+0x154/0x190 [ 255.387498] ret_from_fork+0x27/0x40 [ 257.153175] [ 257.153175] other info that might help us debug this: [ 257.153175] [ 262.324392] Chain exists of: [ 262.324392] acpi_pm_notifier_lock --> "kacpi_notify" --> (&dpc->work) [ 262.324392] [ 267.391997] Possible unsafe locking scenario: [ 267.391997] [ 270.758262] CPU0 CPU1 [ 272.431713] ---- ---- [ 274.060756] lock((&dpc->work)); [ 275.646532] lock("kacpi_notify"); [ 277.260772] lock((&dpc->work)); [ 278.839146] lock(acpi_pm_notifier_lock); [ 280.391902] [ 280.391902] *** DEADLOCK *** [ 280.391902] [ 284.986385] 2 locks held by kworker/0:2/47: [ 286.524895] #0: ("kacpi_notify"){+.+.}, at: [] process_one_work+0x160/0x720 [ 288.112927] #1: ((&dpc->work)){+.+.}, at: [] process_one_work+0x160/0x720 [ 289.727725] Fixes: c072530f391e (ACPI / PM: Revork the handling of ACPI device wakeup notifications) Signed-off-by: Ville Syrjälä Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/device_pm.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index fbcc73f7a09909..18af71057b44e7 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -387,6 +387,7 @@ EXPORT_SYMBOL(acpi_bus_power_manageable); #ifdef CONFIG_PM static DEFINE_MUTEX(acpi_pm_notifier_lock); +static DEFINE_MUTEX(acpi_pm_notifier_install_lock); void acpi_pm_wakeup_event(struct device *dev) { @@ -443,24 +444,25 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, if (!dev && !func) return AE_BAD_PARAMETER; - mutex_lock(&acpi_pm_notifier_lock); + mutex_lock(&acpi_pm_notifier_install_lock); if (adev->wakeup.flags.notifier_present) goto out; - adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev)); - adev->wakeup.context.dev = dev; - adev->wakeup.context.func = func; - status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY, acpi_pm_notify_handler, NULL); if (ACPI_FAILURE(status)) goto out; + mutex_lock(&acpi_pm_notifier_lock); + adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev)); + adev->wakeup.context.dev = dev; + adev->wakeup.context.func = func; adev->wakeup.flags.notifier_present = true; + mutex_unlock(&acpi_pm_notifier_lock); out: - mutex_unlock(&acpi_pm_notifier_lock); + mutex_unlock(&acpi_pm_notifier_install_lock); return status; } @@ -472,7 +474,7 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev) { acpi_status status = AE_BAD_PARAMETER; - mutex_lock(&acpi_pm_notifier_lock); + mutex_lock(&acpi_pm_notifier_install_lock); if (!adev->wakeup.flags.notifier_present) goto out; @@ -483,14 +485,15 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev) if (ACPI_FAILURE(status)) goto out; + mutex_lock(&acpi_pm_notifier_lock); adev->wakeup.context.func = NULL; adev->wakeup.context.dev = NULL; wakeup_source_unregister(adev->wakeup.ws); - adev->wakeup.flags.notifier_present = false; + mutex_unlock(&acpi_pm_notifier_lock); out: - mutex_unlock(&acpi_pm_notifier_lock); + mutex_unlock(&acpi_pm_notifier_install_lock); return status; } From 3fe36d0c58a2a786308563163a999943bdc7b8b2 Mon Sep 17 00:00:00 2001 From: Lv Zheng Date: Tue, 26 Sep 2017 16:54:03 +0800 Subject: [PATCH 0063/3326] ACPI / EC: Fix regression related to triggering source of EC event handling commit 53c5eaabaea9a1b7a96f95ccc486d2ad721d95bb upstream. Originally the Samsung quirks removed by commit 4c237371 can be covered by commit e923e8e7 and ec_freeze_events=Y mode. But commit 9c40f956 changed ec_freeze_events=Y back to N, making this problem re-surface. Actually, if commit e923e8e7 is robust enough, we can freely change ec_freeze_events mode, so this patch fixes the issue by improving commit e923e8e7. Related commits listed in the merged order: Commit: e923e8e79e18fd6be9162f1be6b99a002e9df2cb Subject: ACPI / EC: Fix an issue that SCI_EVT cannot be detected after event is enabled Commit: 4c237371f290d1ed3b2071dd43554362137b1cce Subject: ACPI / EC: Remove old CLEAR_ON_RESUME quirk Commit: 9c40f956ce9b331493347d1b3cb7e384f7dc0581 Subject: Revert "ACPI / EC: Enable event freeze mode..." to fix a regression This patch not only fixes the reported post-resume EC event triggering source issue, but also fixes an unreported similar issue related to the driver bind by adding EC event triggering source in ec_install_handlers(). Fixes: e923e8e79e18 (ACPI / EC: Fix an issue that SCI_EVT cannot be detected after event is enabled) Fixes: 4c237371f290 (ACPI / EC: Remove old CLEAR_ON_RESUME quirk) Fixes: 9c40f956ce9b (Revert "ACPI / EC: Enable event freeze mode..." to fix a regression) Link: https://bugzilla.kernel.org/show_bug.cgi?id=196833 Signed-off-by: Lv Zheng Reported-by: Alistair Hamilton Tested-by: Alistair Hamilton Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/ec.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 236b14324780a1..82b3ce5e937e8b 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -486,8 +486,11 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec) { if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) ec_log_drv("event unblocked"); - if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) - advance_transaction(ec); + /* + * Unconditionally invoke this once after enabling the event + * handling mechanism to detect the pending events. + */ + advance_transaction(ec); } static inline void __acpi_ec_disable_event(struct acpi_ec *ec) @@ -1456,11 +1459,10 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events) if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1) acpi_ec_enable_gpe(ec, true); - - /* EC is fully operational, allow queries */ - acpi_ec_enable_event(ec); } } + /* EC is fully operational, allow queries */ + acpi_ec_enable_event(ec); return 0; } From b79974945e48f9583861c464a8558d1db432c613 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 8 Nov 2017 20:23:55 +0530 Subject: [PATCH 0064/3326] cpufreq: schedutil: Reset cached_raw_freq when not in sync with next_freq commit 07458f6a5171d97511dfbdf6ce549ed2ca0280c7 upstream. 'cached_raw_freq' is used to get the next frequency quickly but should always be in sync with sg_policy->next_freq. There is a case where it is not and in such cases it should be reset to avoid switching to incorrect frequencies. Consider this case for example: - policy->cur is 1.2 GHz (Max) - New request comes for 780 MHz and we store that in cached_raw_freq. - Based on 780 MHz, we calculate the effective frequency as 800 MHz. - We then see the CPU wasn't idle recently and choose to keep the next freq as 1.2 GHz. - Now we have cached_raw_freq is 780 MHz and sg_policy->next_freq is 1.2 GHz. - Now if the utilization doesn't change in then next request, then the next target frequency will still be 780 MHz and it will match with cached_raw_freq. But we will choose 1.2 GHz instead of 800 MHz here. Fixes: b7eaf1aab9f8 (cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely) Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- kernel/sched/cpufreq_schedutil.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index ba0da243fdd837..2f52ec0f1539fc 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -282,8 +282,12 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, * Do not reduce the frequency if the CPU has not been idle * recently, as the reduction is likely to be premature then. */ - if (busy && next_f < sg_policy->next_freq) + if (busy && next_f < sg_policy->next_freq) { next_f = sg_policy->next_freq; + + /* Reset cached freq as next_freq has changed */ + sg_policy->cached_raw_freq = 0; + } } sugov_update_commit(sg_policy, time, next_f); } From 668a1285187668ce162a3aa6b4f5e3272e659954 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 10 Oct 2017 18:09:49 +0200 Subject: [PATCH 0065/3326] serdev: fix registration of second slave commit 08fcee289f341786eb3b44e5f2d1dc850943238e upstream. Serdev currently only supports a single slave device, but the required sanity checks to prevent further registration attempts were missing. If a serial-port node has two child nodes with compatible properties, the OF code would try to register two slave devices using the same id and name. Driver core will not allow this (and there will be loud complaints), but the controller's slave pointer would already have been set to address of the soon to be deallocated second struct serdev_device. As the first slave device remains registered, this can lead to later use-after-free issues when the slave callbacks are accessed. Note that while the serdev registration helpers are exported, they are typically only called by serdev core. Any other (out-of-tree) callers must serialise registration and deregistration themselves. Fixes: cd6484e1830b ("serdev: Introduce new bus for serial attached devices") Cc: Rob Herring Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serdev/core.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index c68fb3a8ea1c3c..97db76afced2ed 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -65,21 +65,32 @@ static int serdev_uevent(struct device *dev, struct kobj_uevent_env *env) */ int serdev_device_add(struct serdev_device *serdev) { + struct serdev_controller *ctrl = serdev->ctrl; struct device *parent = serdev->dev.parent; int err; dev_set_name(&serdev->dev, "%s-%d", dev_name(parent), serdev->nr); + /* Only a single slave device is currently supported. */ + if (ctrl->serdev) { + dev_err(&serdev->dev, "controller busy\n"); + return -EBUSY; + } + ctrl->serdev = serdev; + err = device_add(&serdev->dev); if (err < 0) { dev_err(&serdev->dev, "Can't add %s, status %d\n", dev_name(&serdev->dev), err); - goto err_device_add; + goto err_clear_serdev; } dev_dbg(&serdev->dev, "device %s registered\n", dev_name(&serdev->dev)); -err_device_add: + return 0; + +err_clear_serdev: + ctrl->serdev = NULL; return err; } EXPORT_SYMBOL_GPL(serdev_device_add); @@ -90,7 +101,10 @@ EXPORT_SYMBOL_GPL(serdev_device_add); */ void serdev_device_remove(struct serdev_device *serdev) { + struct serdev_controller *ctrl = serdev->ctrl; + device_unregister(&serdev->dev); + ctrl->serdev = NULL; } EXPORT_SYMBOL_GPL(serdev_device_remove); @@ -295,7 +309,6 @@ struct serdev_device *serdev_device_alloc(struct serdev_controller *ctrl) return NULL; serdev->ctrl = ctrl; - ctrl->serdev = serdev; device_initialize(&serdev->dev); serdev->dev.parent = &ctrl->dev; serdev->dev.bus = &serdev_bus_type; From 9f088f6a6752c18ceb1338e07b7b702a0b086914 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 18 Sep 2017 08:54:40 -0700 Subject: [PATCH 0066/3326] sched: Make resched_cpu() unconditional commit 7c2102e56a3f7d85b5d8f33efbd7aecc1f36fdd8 upstream. The current implementation of synchronize_sched_expedited() incorrectly assumes that resched_cpu() is unconditional, which it is not. This means that synchronize_sched_expedited() can hang when resched_cpu()'s trylock fails as follows (analysis by Neeraj Upadhyay): o CPU1 is waiting for expedited wait to complete: sync_rcu_exp_select_cpus rdp->exp_dynticks_snap & 0x1 // returns 1 for CPU5 IPI sent to CPU5 synchronize_sched_expedited_wait ret = swait_event_timeout(rsp->expedited_wq, sync_rcu_preempt_exp_done(rnp_root), jiffies_stall); expmask = 0x20, CPU 5 in idle path (in cpuidle_enter()) o CPU5 handles IPI and fails to acquire rq lock. Handles IPI sync_sched_exp_handler resched_cpu returns while failing to try lock acquire rq->lock need_resched is not set o CPU5 calls rcu_idle_enter() and as need_resched is not set, goes to idle (schedule() is not called). o CPU 1 reports RCU stall. Given that resched_cpu() is now used only by RCU, this commit fixes the assumption by making resched_cpu() unconditional. Reported-by: Neeraj Upadhyay Suggested-by: Neeraj Upadhyay Signed-off-by: Paul E. McKenney Acked-by: Steven Rostedt (VMware) Acked-by: Peter Zijlstra (Intel) Signed-off-by: Greg Kroah-Hartman --- kernel/sched/core.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d17c5da523a0bc..8fa7b6f9e19b86 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -505,8 +505,7 @@ void resched_cpu(int cpu) struct rq *rq = cpu_rq(cpu); unsigned long flags; - if (!raw_spin_trylock_irqsave(&rq->lock, flags)) - return; + raw_spin_lock_irqsave(&rq->lock, flags); resched_curr(rq); raw_spin_unlock_irqrestore(&rq->lock, flags); } From ce922b7b4a5178279eb5453e7fcb0b116c757f1e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 7 Nov 2017 14:15:27 -0800 Subject: [PATCH 0067/3326] lib/mpi: call cond_resched() from mpi_powm() loop commit 1d9ddde12e3c9bab7f3d3484eb9446315e3571ca upstream. On a non-preemptible kernel, if KEYCTL_DH_COMPUTE is called with the largest permitted inputs (16384 bits), the kernel spends 10+ seconds doing modular exponentiation in mpi_powm() without rescheduling. If all threads do it, it locks up the system. Moreover, it can cause rcu_sched-stall warnings. Notwithstanding the insanity of doing this calculation in kernel mode rather than in userspace, fix it by calling cond_resched() as each bit from the exponent is processed. It's still noninterruptible, but at least it's preemptible now. Do the cond_resched() once per bit rather than once per MPI limb because each limb might still easily take 100+ milliseconds on slow CPUs. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- lib/mpi/mpi-pow.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c index e24388a863a76f..468fb7cd1221ef 100644 --- a/lib/mpi/mpi-pow.c +++ b/lib/mpi/mpi-pow.c @@ -26,6 +26,7 @@ * however I decided to publish this code under the plain GPL. */ +#include #include #include "mpi-internal.h" #include "longlong.h" @@ -256,6 +257,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) } e <<= 1; c--; + cond_resched(); } i--; From 46855f80d1a0174b008ac7c6b8439a1c294bcb53 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 6 Nov 2017 14:17:53 -0600 Subject: [PATCH 0068/3326] x86/boot: Fix boot failure when SMP MP-table is based at 0 commit ac5292e9a294618cecb31109d1ba265e3d027ba2 upstream. When crosvm is used to boot a kernel as a VM, the SMP MP-table is found at physical address 0x0. This causes mpf_base to be set to 0 and a subsequent "if (!mpf_base)" check in default_get_smp_config() results in the MP-table not being parsed. Further into the boot this results in an oops when attempting a read_apic_id(). Add a boolean variable that is set to true when the MP-table is found. Use this variable for testing if the MP-table was found so that even a value of 0 for mpf_base will result in continued parsing of the MP-table. Fixes: 5997efb96756 ("x86/boot: Use memremap() to map the MPF and MPC data") Reported-by: Tomeu Vizoso Signed-off-by: Tom Lendacky Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Borislav Petkov Cc: regression@leemhuis.info Link: https://lkml.kernel.org/r/20171106201753.23059.86674.stgit@tlendack-t1.amdoffice.net Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/mpparse.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 410c5dadcee319..3a4b12809ab5f8 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -431,6 +431,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) } static unsigned long mpf_base; +static bool mpf_found; static unsigned long __init get_mpc_size(unsigned long physptr) { @@ -504,7 +505,7 @@ void __init default_get_smp_config(unsigned int early) if (!smp_found_config) return; - if (!mpf_base) + if (!mpf_found) return; if (acpi_lapic && early) @@ -593,6 +594,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) smp_found_config = 1; #endif mpf_base = base; + mpf_found = true; pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n", base, base + sizeof(*mpf) - 1, mpf); @@ -858,7 +860,7 @@ static int __init update_mp_table(void) if (!enable_update_mptable) return 0; - if (!mpf_base) + if (!mpf_found) return 0; mpf = early_memremap(mpf_base, sizeof(*mpf)); From 00d5e292a8ea9675c6ac4583743b811fda6ba157 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 24 Nov 2017 13:56:30 +0900 Subject: [PATCH 0069/3326] x86/decoder: Add new TEST instruction pattern commit 12a78d43de767eaf8fb272facb7a7b6f2dc6a9df upstream. The kbuild test robot reported this build warning: Warning: arch/x86/tools/test_get_len found difference at :ffffffff8103dd2c Warning: ffffffff8103dd82: f6 09 d8 testb $0xd8,(%rcx) Warning: objdump says 3 bytes, but insn_get_length() says 2 Warning: decoded and checked 1569014 instructions with 1 warnings This sequence seems to be a new instruction not in the opcode map in the Intel SDM. The instruction sequence is "F6 09 d8", means Group3(F6), MOD(00)REG(001)RM(001), and 0xd8. Intel SDM vol2 A.4 Table A-6 said the table index in the group is "Encoding of Bits 5,4,3 of the ModR/M Byte (bits 2,1,0 in parenthesis)" In that table, opcodes listed by the index REG bits as: 000 001 010 011 100 101 110 111 TEST Ib/Iz,(undefined),NOT,NEG,MUL AL/rAX,IMUL AL/rAX,DIV AL/rAX,IDIV AL/rAX So, it seems TEST Ib is assigned to 001. Add the new pattern. Reported-by: kbuild test robot Signed-off-by: Masami Hiramatsu Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/lib/x86-opcode-map.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 12e377184ee4ad..c4d55919fac19e 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -896,7 +896,7 @@ EndTable GrpTable: Grp3_1 0: TEST Eb,Ib -1: +1: TEST Eb,Ib 2: NOT Eb 3: NEG Eb 4: MUL AL,Eb From c91f3fc2411d9f007fbbaf9b10f772805e01cafe Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Tue, 21 Nov 2017 20:43:56 -0800 Subject: [PATCH 0070/3326] x86/entry/64: Fix entry_SYSCALL_64_after_hwframe() IRQ tracing commit 548c3050ea8d16997ae27f9e080a8338a606fc93 upstream. When I added entry_SYSCALL_64_after_hwframe(), I left TRACE_IRQS_OFF before it. This means that users of entry_SYSCALL_64_after_hwframe() were responsible for invoking TRACE_IRQS_OFF, and the one and only user (Xen, added in the same commit) got it wrong. I think this would manifest as a warning if a Xen PV guest with CONFIG_DEBUG_LOCKDEP=y were used with context tracking. (The context tracking bit is to cause lockdep to get invoked before we turn IRQs back on.) I haven't tested that for real yet because I can't get a kernel configured like that to boot at all on Xen PV. Move TRACE_IRQS_OFF below the label. Signed-off-by: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 8a9949bc71a7 ("x86/xen/64: Rearrange the SYSCALL entries") Link: http://lkml.kernel.org/r/9150aac013b7b95d62c2336751d5b6e91d2722aa.1511325444.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index bcfc5668dcb22f..2e956afe272c09 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -148,8 +148,6 @@ ENTRY(entry_SYSCALL_64) movq %rsp, PER_CPU_VAR(rsp_scratch) movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp - TRACE_IRQS_OFF - /* Construct struct pt_regs on stack */ pushq $__USER_DS /* pt_regs->ss */ pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ @@ -170,6 +168,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ UNWIND_HINT_REGS extra=0 + TRACE_IRQS_OFF + /* * If we need to do entry work or if we guess we'll need to do * exit work, go straight to the slow path. From f9a64e23a9da528e7d8aa1bd2c7bb92be4ebb724 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 22 Nov 2017 20:39:16 -0800 Subject: [PATCH 0071/3326] x86/entry/64: Add missing irqflags tracing to native_load_gs_index() commit ca37e57bbe0cf1455ea3e84eb89ed04a132d59e1 upstream. Running this code with IRQs enabled (where dummy_lock is a spinlock): static void check_load_gs_index(void) { /* This will fail. */ load_gs_index(0xffff); spin_lock(&dummy_lock); spin_unlock(&dummy_lock); } Will generate a lockdep warning. The issue is that the actual write to %gs would cause an exception with IRQs disabled, and the exception handler would, as an inadvertent side effect, update irqflag tracing to reflect the IRQs-off status. native_load_gs_index() would then turn IRQs back on and return with irqflag tracing still thinking that IRQs were off. The dummy lock-and-unlock causes lockdep to notice the error and warn. Fix it by adding the missing tracing. Apparently nothing did this in a context where it mattered. I haven't tried to find a code path that would actually exhibit the warning if appropriately nasty user code were running. I suspect that the security impact of this bug is very, very low -- production systems don't run with lockdep enabled, and the warning is mostly harmless anyway. Found during a quick audit of the entry code to try to track down an unrelated bug that Ingo found in some still-in-development code. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/e1aeb0e6ba8dd430ec36c8a35e63b429698b4132.1511411918.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 2e956afe272c09..518d9286b3d116 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -51,15 +51,19 @@ ENTRY(native_usergs_sysret64) END(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ -.macro TRACE_IRQS_IRETQ +.macro TRACE_IRQS_FLAGS flags:req #ifdef CONFIG_TRACE_IRQFLAGS - bt $9, EFLAGS(%rsp) /* interrupts off? */ + bt $9, \flags /* interrupts off? */ jnc 1f TRACE_IRQS_ON 1: #endif .endm +.macro TRACE_IRQS_IRETQ + TRACE_IRQS_FLAGS EFLAGS(%rsp) +.endm + /* * When dynamic function tracer is enabled it will add a breakpoint * to all locations that it is about to modify, sync CPUs, update @@ -923,11 +927,13 @@ ENTRY(native_load_gs_index) FRAME_BEGIN pushfq DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) + TRACE_IRQS_OFF SWAPGS .Lgs_change: movl %edi, %gs 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE SWAPGS + TRACE_IRQS_FLAGS (%rsp) popfq FRAME_END ret From c35be334862c2cc6f4dd58ba638c0e7b341c7862 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 8 Nov 2017 16:07:18 -0800 Subject: [PATCH 0072/3326] perf/x86/intel: Hide TSX events when RTM is not supported commit 58ba4d5a25579e5c7e312bd359c95f3a9a0a242c upstream. 0day testing reported a perf test regression on Haswell systems without RTM. Commit a5df70c35 hides the in_tx/in_tx_cp attributes when RTM is not available, but the TSX events are still available in sysfs. Due to the missing attributes the event parser fails on those files. Don't show the TSX events in sysfs when RTM is not available on Haswell/Broadwell/Skylake. Fixes: a5df70c354c2 (perf/x86: Only show format attributes when supported) Reported-by: kernel test robot Tested-by: Jin Yao Signed-off-by: Andi Kleen Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Link: https://lkml.kernel.org/r/20171109000718.14137-1-andi@firstfloor.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/intel/core.c | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 9fb9a1f1e47bd0..f94855000d4ed0 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3730,6 +3730,19 @@ EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1"); EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1"); static struct attribute *hsw_events_attrs[] = { + EVENT_PTR(mem_ld_hsw), + EVENT_PTR(mem_st_hsw), + EVENT_PTR(td_slots_issued), + EVENT_PTR(td_slots_retired), + EVENT_PTR(td_fetch_bubbles), + EVENT_PTR(td_total_slots), + EVENT_PTR(td_total_slots_scale), + EVENT_PTR(td_recovery_bubbles), + EVENT_PTR(td_recovery_bubbles_scale), + NULL +}; + +static struct attribute *hsw_tsx_events_attrs[] = { EVENT_PTR(tx_start), EVENT_PTR(tx_commit), EVENT_PTR(tx_abort), @@ -3742,18 +3755,16 @@ static struct attribute *hsw_events_attrs[] = { EVENT_PTR(el_conflict), EVENT_PTR(cycles_t), EVENT_PTR(cycles_ct), - EVENT_PTR(mem_ld_hsw), - EVENT_PTR(mem_st_hsw), - EVENT_PTR(td_slots_issued), - EVENT_PTR(td_slots_retired), - EVENT_PTR(td_fetch_bubbles), - EVENT_PTR(td_total_slots), - EVENT_PTR(td_total_slots_scale), - EVENT_PTR(td_recovery_bubbles), - EVENT_PTR(td_recovery_bubbles_scale), NULL }; +static __init struct attribute **get_hsw_events_attrs(void) +{ + return boot_cpu_has(X86_FEATURE_RTM) ? + merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) : + hsw_events_attrs; +} + static ssize_t freeze_on_smi_show(struct device *cdev, struct device_attribute *attr, char *buf) @@ -4182,7 +4193,7 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.cpu_events = hsw_events_attrs; + x86_pmu.cpu_events = get_hsw_events_attrs(); x86_pmu.lbr_double_abort = true; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; @@ -4221,7 +4232,7 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.cpu_events = hsw_events_attrs; + x86_pmu.cpu_events = get_hsw_events_attrs(); x86_pmu.limit_period = bdw_limit_period; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; @@ -4279,7 +4290,7 @@ __init int intel_pmu_init(void) extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; extra_attr = merge_attr(extra_attr, skl_format_attr); - x86_pmu.cpu_events = hsw_events_attrs; + x86_pmu.cpu_events = get_hsw_events_attrs(); intel_pmu_pebs_data_source_skl( boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); pr_cont("Skylake events, "); From ecc3c4795bc3c7624c7b727c91ef5328fda6fc79 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 26 Oct 2017 18:36:47 +0100 Subject: [PATCH 0073/3326] arm64: Implement arch-specific pte_access_permitted() commit 6218f96c58dbf44a06aeaf767aab1f54fc397838 upstream. The generic pte_access_permitted() implementation only checks for pte_present() (together with the write permission where applicable). However, for both kernel ptes and PROT_NONE mappings pte_present() also returns true on arm64 even though such mappings are not user accessible. Additionally, arm64 now supports execute-only user permission (PROT_EXEC) which is implemented by clearing the PTE_USER bit. With this patch the arm64 implementation of pte_access_permitted() checks for the PTE_VALID and PTE_USER bits together with writable access if applicable. Reported-by: Al Viro Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/pgtable.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index b46e54c2399b58..c9530b5b5ca836 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -98,6 +98,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) #define pte_valid_young(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) +#define pte_valid_user(pte) \ + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) /* * Could the pte be present in the TLB? We must check mm_tlb_flush_pending @@ -107,6 +109,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_accessible(mm, pte) \ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) +/* + * p??_access_permitted() is true for valid user mappings (subject to the + * write permission check) other than user execute-only which do not have the + * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. + */ +#define pte_access_permitted(pte, write) \ + (pte_valid_user(pte) && (!(write) || pte_write(pte))) +#define pmd_access_permitted(pmd, write) \ + (pte_access_permitted(pmd_pte(pmd), (write))) +#define pud_access_permitted(pud, write) \ + (pte_access_permitted(pud_pte(pud), (write))) + static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) { pte_val(pte) &= ~pgprot_val(prot); From cb7cc998a0e5f0f4572609ccc334de5a6260b406 Mon Sep 17 00:00:00 2001 From: Philip Derrin Date: Tue, 14 Nov 2017 00:55:25 +0100 Subject: [PATCH 0074/3326] ARM: 8722/1: mm: make STRICT_KERNEL_RWX effective for LPAE commit 400eeffaffc7232c0ae1134fe04e14ae4fb48d8c upstream. Currently, for ARM kernels with CONFIG_ARM_LPAE and CONFIG_STRICT_KERNEL_RWX enabled, the 2MiB pages mapping the kernel code and rodata are writable. They are marked read-only in a software bit (L_PMD_SECT_RDONLY) but the hardware read-only bit is not set (PMD_SECT_AP2). For user mappings, the logic that propagates the software bit to the hardware bit is in set_pmd_at(); but for the kernel, section_update() writes the PMDs directly, skipping this logic. The fix is to set PMD_SECT_AP2 for read-only sections in section_update(), at the same time as L_PMD_SECT_RDONLY. Fixes: 1e3479225acb ("ARM: 8275/1: mm: fix PMD_SECT_RDONLY undeclared compile error") Signed-off-by: Philip Derrin Reported-by: Neil Dick Tested-by: Neil Dick Tested-by: Laura Abbott Reviewed-by: Kees Cook Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/mm/init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index ad80548325fe9a..0f6d1537f3301d 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -639,8 +639,8 @@ static struct section_perm ro_perms[] = { .start = (unsigned long)_stext, .end = (unsigned long)__init_begin, #ifdef CONFIG_ARM_LPAE - .mask = ~L_PMD_SECT_RDONLY, - .prot = L_PMD_SECT_RDONLY, + .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2), + .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2, #else .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, From 30d3389d80eb0c6d2a49ffa07c8fd1eec5a49873 Mon Sep 17 00:00:00 2001 From: Philip Derrin Date: Tue, 14 Nov 2017 00:55:26 +0100 Subject: [PATCH 0075/3326] ARM: 8721/1: mm: dump: check hardware RO bit for LPAE commit 3b0c0c922ff4be275a8beb87ce5657d16f355b54 upstream. When CONFIG_ARM_LPAE is set, the PMD dump relies on the software read-only bit to determine whether a page is writable. This concealed a bug which left the kernel text section writable (AP2=0) while marked read-only in the software bit. In a kernel with the AP2 bug, the dump looks like this: ---[ Kernel Mapping ]--- 0xc0000000-0xc0200000 2M RW NX SHD 0xc0200000-0xc0600000 4M ro x SHD 0xc0600000-0xc0800000 2M ro NX SHD 0xc0800000-0xc4800000 64M RW NX SHD The fix is to check that the software and hardware bits are both set before displaying "ro". The dump then shows the true perms: ---[ Kernel Mapping ]--- 0xc0000000-0xc0200000 2M RW NX SHD 0xc0200000-0xc0600000 4M RW x SHD 0xc0600000-0xc0800000 2M RW NX SHD 0xc0800000-0xc4800000 64M RW NX SHD Fixes: ded947798469 ("ARM: 8109/1: mm: Modify pte_write and pmd_write logic for LPAE") Signed-off-by: Philip Derrin Tested-by: Neil Dick Reviewed-by: Kees Cook Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/mm/dump.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 35ff45470dbfd5..fc3b44028cfb22 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -129,8 +129,8 @@ static const struct prot_bits section_bits[] = { .val = PMD_SECT_USER, .set = "USR", }, { - .mask = L_PMD_SECT_RDONLY, - .val = L_PMD_SECT_RDONLY, + .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2, + .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2, .set = "ro", .clear = "RW", #elif __LINUX_ARM_ARCH__ >= 6 From 33e58deefa0907ffba4d5a4c69f3641a649358d3 Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Tue, 14 Nov 2017 06:30:11 +0300 Subject: [PATCH 0076/3326] uapi: fix linux/tls.h userspace compilation error commit b9f3eb499d84f8d4adcb2f9212ec655700b28228 upstream. Move inclusion of a private kernel header from uapi/linux/tls.h to its only user - net/tls.h, to fix the following linux/tls.h userspace compilation error: /usr/include/linux/tls.h:41:21: fatal error: net/tcp.h: No such file or directory As to this point uapi/linux/tls.h was totaly unusuable for userspace, cleanup this header file further by moving other redundant includes to net/tls.h. Fixes: 3c4d7559159b ("tls: kernel TLS support") Signed-off-by: Dmitry V. Levin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/tls.h | 4 ++++ include/uapi/linux/tls.h | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/net/tls.h b/include/net/tls.h index b89d397dd62fc7..c06db1eadac217 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -35,6 +35,10 @@ #define _TLS_OFFLOAD_H #include +#include +#include +#include +#include #include diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h index d5e0682ab8371b..293b2cdad88d94 100644 --- a/include/uapi/linux/tls.h +++ b/include/uapi/linux/tls.h @@ -35,10 +35,6 @@ #define _UAPI_LINUX_TLS_H #include -#include -#include -#include -#include /* TLS socket options */ #define TLS_TX 1 /* Set transmit parameters */ From 8177aa54c509056c599e7cf1c7e91630814686e6 Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Mon, 13 Nov 2017 03:37:06 +0300 Subject: [PATCH 0077/3326] uapi: fix linux/rxrpc.h userspace compilation errors commit 0eef304bc9f7d079a1165e8cd2f24b078e9e1f2a upstream. Consistently use types provided by to fix the following linux/rxrpc.h userspace compilation errors: /usr/include/linux/rxrpc.h:24:2: error: unknown type name 'u16' u16 srx_service; /* service desired */ /usr/include/linux/rxrpc.h:25:2: error: unknown type name 'u16' u16 transport_type; /* type of transport socket (SOCK_DGRAM) */ /usr/include/linux/rxrpc.h:26:2: error: unknown type name 'u16' u16 transport_len; /* length of transport address */ Use __kernel_sa_family_t instead of sa_family_t the same way as uapi/linux/in.h does, to fix the following linux/rxrpc.h userspace compilation errors: /usr/include/linux/rxrpc.h:23:2: error: unknown type name 'sa_family_t' sa_family_t srx_family; /* address family */ /usr/include/linux/rxrpc.h:28:3: error: unknown type name 'sa_family_t' sa_family_t family; /* transport address family */ Fixes: 727f8914477e ("rxrpc: Expose UAPI definitions to userspace") Signed-off-by: Dmitry V. Levin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/rxrpc.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h index 9656aad8f8f7b3..9d4afea308a434 100644 --- a/include/uapi/linux/rxrpc.h +++ b/include/uapi/linux/rxrpc.h @@ -20,12 +20,12 @@ * RxRPC socket address */ struct sockaddr_rxrpc { - sa_family_t srx_family; /* address family */ - u16 srx_service; /* service desired */ - u16 transport_type; /* type of transport socket (SOCK_DGRAM) */ - u16 transport_len; /* length of transport address */ + __kernel_sa_family_t srx_family; /* address family */ + __u16 srx_service; /* service desired */ + __u16 transport_type; /* type of transport socket (SOCK_DGRAM) */ + __u16 transport_len; /* length of transport address */ union { - sa_family_t family; /* transport address family */ + __kernel_sa_family_t family; /* transport address family */ struct sockaddr_in sin; /* IPv4 transport address */ struct sockaddr_in6 sin6; /* IPv6 transport address */ } transport; From c792238027157d0fc6a80a8ea1b9d73443459d75 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 4 Oct 2017 03:46:14 +0100 Subject: [PATCH 0078/3326] MIPS: cmpxchg64() and HAVE_VIRT_CPU_ACCOUNTING_GEN don't work for 32-bit SMP commit a3f143106596d739e7fbc4b84c96b1475247d876 upstream. __cmpxchg64_local_generic() is atomic only w.r.t tasks and interrupts on the same CPU (that's what the 'local' means). We can't use it to implement cmpxchg64() in SMP configurations. So, for 32-bit SMP configurations: - Don't define cmpxchg64() - Don't enable HAVE_VIRT_CPU_ACCOUNTING_GEN, which requires it Fixes: e2093c7b03c1 ("MIPS: Fall back to generic implementation of ...") Fixes: bb877e96bea1 ("MIPS: Add support for full dynticks CPU time accounting") Signed-off-by: Ben Hutchings Cc: Ralf Baechle Cc: Deng-Cheng Zhu Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/17413/ Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/Kconfig | 2 +- arch/mips/include/asm/cmpxchg.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 5d3284d20678a3..c3d798b4403072 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -65,7 +65,7 @@ config MIPS select HAVE_PERF_EVENTS select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS - select HAVE_VIRT_CPU_ACCOUNTING_GEN + select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP select IRQ_FORCED_THREADING select MODULES_USE_ELF_RELA if MODULES && 64BIT select MODULES_USE_ELF_REL if MODULES diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 7e25c5cc353a82..89e9fb7976fe61 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -204,8 +204,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, #else #include #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#ifndef CONFIG_SMP #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #endif +#endif #undef __scbeqz From 8086ac5cdda0199d1231267808acc74cc75d6edf Mon Sep 17 00:00:00 2001 From: Mathias Kresin Date: Thu, 11 May 2017 08:11:14 +0200 Subject: [PATCH 0079/3326] MIPS: ralink: Fix MT7628 pinmux commit 8ef4b43cd3794d63052d85898e42424fd3b14d24 upstream. According to the datasheet the REFCLK pin is shared with GPIO#37 and the PERST pin is shared with GPIO#36. Fixes: 53263a1c6852 ("MIPS: ralink: add mt7628an support") Signed-off-by: Mathias Kresin Acked-by: John Crispin Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16046/ Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/ralink/mt7620.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index 9be8b08ae46b76..fda073cd7b737d 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -145,8 +145,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = { FUNC("i2c", 0, 4, 2), }; -static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) }; -static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) }; +static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 37, 1) }; +static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) }; static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; From 0d0a61fbc66724b2fdbbc7bf2b34354a4452efcd Mon Sep 17 00:00:00 2001 From: Mathias Kresin Date: Thu, 11 May 2017 08:11:15 +0200 Subject: [PATCH 0080/3326] MIPS: ralink: Fix typo in mt7628 pinmux function commit 05a67cc258e75ac9758e6f13d26337b8be51162a upstream. There is a typo inside the pinmux setup code. The function is called refclk and not reclk. Fixes: 53263a1c6852 ("MIPS: ralink: add mt7628an support") Signed-off-by: Mathias Kresin Acked-by: John Crispin Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16047/ Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/ralink/mt7620.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index fda073cd7b737d..41b71c4352c252 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -145,7 +145,7 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = { FUNC("i2c", 0, 4, 2), }; -static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 37, 1) }; +static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) }; static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) }; static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; From a59e386c4f4fce346161c0a62a5e1fbc1f435d6f Mon Sep 17 00:00:00 2001 From: Simon Guinot Date: Mon, 13 Nov 2017 16:27:02 +0100 Subject: [PATCH 0081/3326] net: mvneta: fix handling of the Tx descriptor counter commit 0d63785c6b94b5d2f095f90755825f90eea791f5 upstream. The mvneta controller provides a 8-bit register to update the pending Tx descriptor counter. Then, a maximum of 255 Tx descriptors can be added at once. In the current code the mvneta_txq_pend_desc_add function assumes the caller takes care of this limit. But it is not the case. In some situations (xmit_more flag), more than 255 descriptors are added. When this happens, the Tx descriptor counter register is updated with a wrong value, which breaks the whole Tx queue management. This patch fixes the issue by allowing the mvneta_txq_pend_desc_add function to process more than 255 Tx descriptors. Fixes: 2a90f7e1d5d0 ("net: mvneta: add xmit_more support") Signed-off-by: Simon Guinot Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/marvell/mvneta.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 64a04975bcf896..bc93b69cfd1edc 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -816,11 +816,14 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, { u32 val; - /* Only 255 descriptors can be added at once ; Assume caller - * process TX desriptors in quanta less than 256 - */ - val = pend_desc + txq->pending; - mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + pend_desc += txq->pending; + + /* Only 255 Tx descriptors can be added at once */ + do { + val = min(pend_desc, 255); + mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + pend_desc -= val; + } while (pend_desc > 0); txq->pending = 0; } From f6b7c54c2d0ee8fe4d3b1cdfdb1961a80be60d3f Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 6 Nov 2017 16:11:57 -0500 Subject: [PATCH 0082/3326] nbd: wait uninterruptible for the dead timeout commit ff57dc94faec023abc267cdc45766fccff497557 upstream. If we have a pending signal or the user kills their application then it'll bring down the whole device, which is less than awesome. Instead wait uninterruptible for the dead timeout so we're sure we gave it our best shot. Fixes: 560bc4b39952 ("nbd: handle dead connections") Signed-off-by: Josef Bacik Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/block/nbd.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9adfb5445f8dca..fdef8efcdabc40 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -723,9 +723,9 @@ static int wait_for_reconnect(struct nbd_device *nbd) return 0; if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) return 0; - wait_event_interruptible_timeout(config->conn_wait, - atomic_read(&config->live_connections), - config->dead_conn_timeout); + wait_event_timeout(config->conn_wait, + atomic_read(&config->live_connections), + config->dead_conn_timeout); return atomic_read(&config->live_connections); } From 2031e243efeea84f4f121018715e8c7755ee1914 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 6 Nov 2017 16:11:58 -0500 Subject: [PATCH 0083/3326] nbd: don't start req until after the dead connection logic commit 6a468d5990ecd1c2d07dd85f8633bbdd0ba61c40 upstream. We can end up sleeping for a while waiting for the dead timeout, which means we could get the per request timer to fire. We did handle this case, but if the dead timeout happened right after we submitted we'd either tear down the connection or possibly requeue as we're handling an error and race with the endio which can lead to panics and other hilarity. Fixes: 560bc4b39952 ("nbd: handle dead connections") Signed-off-by: Josef Bacik Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/block/nbd.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index fdef8efcdabc40..5f2a4240a204d5 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -288,15 +288,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, cmd->status = BLK_STS_TIMEOUT; return BLK_EH_HANDLED; } - - /* If we are waiting on our dead timer then we could get timeout - * callbacks for our request. For this we just want to reset the timer - * and let the queue side take care of everything. - */ - if (!completion_done(&cmd->send_complete)) { - nbd_config_put(nbd); - return BLK_EH_RESET_TIMER; - } config = nbd->config; if (config->num_connections > 1) { @@ -740,6 +731,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) if (!refcount_inc_not_zero(&nbd->config_refs)) { dev_err_ratelimited(disk_to_dev(nbd->disk), "Socks array is empty\n"); + blk_mq_start_request(req); return -EINVAL; } config = nbd->config; @@ -748,6 +740,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) dev_err_ratelimited(disk_to_dev(nbd->disk), "Attempted send on invalid socket\n"); nbd_config_put(nbd); + blk_mq_start_request(req); return -EINVAL; } cmd->status = BLK_STS_OK; @@ -771,6 +764,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) */ sock_shutdown(nbd); nbd_config_put(nbd); + blk_mq_start_request(req); return -EIO; } goto again; @@ -781,6 +775,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) * here so that it gets put _after_ the request that is already on the * dispatch list. */ + blk_mq_start_request(req); if (unlikely(nsock->pending && nsock->pending != req)) { blk_mq_requeue_request(req, true); ret = 0; @@ -793,10 +788,10 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) ret = nbd_send_cmd(nbd, cmd, index); if (ret == -EAGAIN) { dev_err_ratelimited(disk_to_dev(nbd->disk), - "Request send failed trying another connection\n"); + "Request send failed, requeueing\n"); nbd_mark_nsock_dead(nbd, nsock, 1); - mutex_unlock(&nsock->tx_lock); - goto again; + blk_mq_requeue_request(req, true); + ret = 0; } out: mutex_unlock(&nsock->tx_lock); @@ -820,7 +815,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, * done sending everything over the wire. */ init_completion(&cmd->send_complete); - blk_mq_start_request(bd->rq); /* We can be called directly from the user space process, which means we * could possibly have signals pending so our sendmsg will fail. In From 7b4c6a3b394f86f6b78a688c092d8dcea976a59c Mon Sep 17 00:00:00 2001 From: Tobias Jordan Date: Wed, 4 Oct 2017 11:35:03 +0530 Subject: [PATCH 0084/3326] PM / OPP: Add missing of_node_put(np) commit 7978db344719dab1e56d05e6fc04aaaddcde0a5e upstream. The for_each_available_child_of_node() loop in _of_add_opp_table_v2() doesn't drop the reference to "np" on errors. Fix that. Fixes: 274659029c9d (PM / OPP: Add support to parse "operating-points-v2" bindings) Signed-off-by: Tobias Jordan [ VK: Improved commit log. ] Signed-off-by: Viresh Kumar Reviewed-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/base/power/opp/of.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 0b718886479bd5..87509cb69f7923 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c @@ -397,6 +397,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, ret); _dev_pm_opp_remove_table(opp_table, dev, false); + of_node_put(np); goto put_opp_table; } } From 576bdf9642d97c8aafb0305960c00259d7438b06 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Mon, 13 Nov 2017 08:50:30 -0600 Subject: [PATCH 0085/3326] PCI/ASPM: Account for downstream device's Port Common_Mode_Restore_Time commit 94ac327e043ee40d7fc57b54541da50507ef4e99 upstream. Every Port that supports the L1.2 substate advertises its Port Common_Mode_Restore_Time, i.e., the time the Port requires to re-establish common mode when exiting L1.2 (see PCIe r3.1, sec 7.33.2). Per sec 5.5.3.3.1, when exiting L1.2, the Downstream Port (the device at the upstream end of the link) must send TS1 training sequences for at least T(COMMONMODE) after it detects electrical idle exit on the Link. We want this to be long enough for both ends of the Link, so we should set it to the maximum of the Port Common_Mode_Restore_Time for the upstream and downstream components on the Link. Previously we only looked at the Port Common_Mode_Restore_Time of the upstream device, so if the downstream device required more time, we didn't program the upstream device's T(COMMONMODE) correctly. Fixes: f1f0366dd6be ("PCI/ASPM: Calculate and save the L1.2 timing parameters") Signed-off-by: Bjorn Helgaas Reviewed-by: Vidya Sagar Acked-by: Rajat Jain Signed-off-by: Greg Kroah-Hartman --- drivers/pci/pcie/aspm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 83e4a892b14be7..1196c520255867 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -453,7 +453,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link, /* Choose the greater of the two T_cmn_mode_rstr_time */ val1 = (upreg->l1ss_cap >> 8) & 0xFF; - val2 = (upreg->l1ss_cap >> 8) & 0xFF; + val2 = (dwreg->l1ss_cap >> 8) & 0xFF; if (val1 > val2) link->l1ss.ctl1 |= val1 << 8; else From d42e6a246cd75143e9133f740ac979011f81bc43 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Mon, 13 Nov 2017 15:05:50 -0600 Subject: [PATCH 0086/3326] PCI/ASPM: Use correct capability pointer to program LTR_L1.2_THRESHOLD commit c00054f540bf81e592e1fee709b0bdbf20f478b5 upstream. Previously we programmed the LTR_L1.2_THRESHOLD in the parent (upstream) device using the capability pointer of the *child* (downstream) device, which corrupted some random word of the parent's config space. Use the parent's L1 SS capability pointer to program its LTR_L1.2_THRESHOLD. Fixes: aeda9adebab8 ("PCI/ASPM: Configure L1 substate settings") Signed-off-by: Bjorn Helgaas Reviewed-by: Vidya Sagar CC: Rajat Jain Signed-off-by: Greg Kroah-Hartman --- drivers/pci/pcie/aspm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 1196c520255867..cae54f8320be43 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -658,7 +658,7 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) 0xFF00, link->l1ss.ctl1); /* Program LTR L1.2 threshold in both ports */ - pci_clear_and_set_dword(parent, dw_cap_ptr + PCI_L1SS_CTL1, + pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, 0xE3FF0000, link->l1ss.ctl1); pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, 0xE3FF0000, link->l1ss.ctl1); From c85364c66e7dd03336d70e98dd6fc5881f34dcb8 Mon Sep 17 00:00:00 2001 From: Dexuan Cui Date: Wed, 1 Nov 2017 20:30:53 +0000 Subject: [PATCH 0087/3326] PCI: hv: Use effective affinity mask commit 79aa801e899417a56863d6713f76c4e108856000 upstream. The effective_affinity_mask is always set when an interrupt is assigned in __assign_irq_vector() -> apic->cpu_mask_to_apicid(), e.g. for struct apic apic_physflat: -> default_cpu_mask_to_apicid() -> irq_data_update_effective_affinity(), but it looks d->common->affinity remains all-1's before the user space or the kernel changes it later. In the early allocation/initialization phase of an IRQ, we should use the effective_affinity_mask, otherwise Hyper-V may not deliver the interrupt to the expected CPU. Without the patch, if we assign 7 Mellanox ConnectX-3 VFs to a 32-vCPU VM, one of the VFs may fail to receive interrupts. Tested-by: Adrian Suhov Signed-off-by: Dexuan Cui Signed-off-by: Bjorn Helgaas Reviewed-by: Jake Oshins Cc: Jork Loeser Cc: Stephen Hemminger Cc: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- drivers/pci/host/pci-hyperv.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 0fe3ea164ee533..04dac6a42c9f28 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -879,7 +879,7 @@ static void hv_irq_unmask(struct irq_data *data) int cpu; u64 res; - dest = irq_data_get_affinity_mask(data); + dest = irq_data_get_effective_affinity_mask(data); pdev = msi_desc_to_pci_dev(msi_desc); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); @@ -1042,6 +1042,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct hv_pci_dev *hpdev; struct pci_bus *pbus; struct pci_dev *pdev; + struct cpumask *dest; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; struct { @@ -1056,6 +1057,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) int ret; pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); + dest = irq_data_get_effective_affinity_mask(data); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); @@ -1081,14 +1083,14 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) switch (pci_protocol_version) { case PCI_PROTOCOL_VERSION_1_1: size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, - irq_data_get_affinity_mask(data), + dest, hpdev->desc.win_slot.slot, cfg->vector); break; case PCI_PROTOCOL_VERSION_1_2: size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, - irq_data_get_affinity_mask(data), + dest, hpdev->desc.win_slot.slot, cfg->vector); break; From f97ca60715d43d826657961db82b4704cb2f68c4 Mon Sep 17 00:00:00 2001 From: Vadim Lomovtsev Date: Tue, 17 Oct 2017 05:47:38 -0700 Subject: [PATCH 0088/3326] PCI: Set Cavium ACS capability quirk flags to assert RR/CR/SV/UF commit 7f342678634f16795892677204366e835e450dda upstream. The Cavium ThunderX (CN8XXX) family of PCIe Root Ports does not advertise an ACS capability. However, the RTL internally implements similar protection as if ACS had Request Redirection, Completion Redirection, Source Validation, and Upstream Forwarding features enabled. Change Cavium ACS capabilities quirk flags accordingly. Fixes: b404bcfbf035 ("PCI: Add ACS quirk for all Cavium devices") Signed-off-by: Vadim Lomovtsev [bhelgaas: tidy changelog, comment, stable tag] Signed-off-by: Bjorn Helgaas Signed-off-by: Greg Kroah-Hartman --- drivers/pci/quirks.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 911b3b65c8b2de..6215511ca898a7 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4215,12 +4215,14 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) { /* - * Cavium devices matching this quirk do not perform peer-to-peer - * with other functions, allowing masking out these bits as if they - * were unimplemented in the ACS capability. + * Cavium root ports don't advertise an ACS capability. However, + * the RTL internally implements similar protection as if ACS had + * Request Redirection, Completion Redirection, Source Validation, + * and Upstream Forwarding features enabled. Assert that the + * hardware implements and enables equivalent ACS functionality for + * these flags. */ - acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | - PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); + acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF); if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff))) return -ENOTTY; From a529422a5cb9c489f47647523779d2ea740d9918 Mon Sep 17 00:00:00 2001 From: Vadim Lomovtsev Date: Tue, 17 Oct 2017 05:47:39 -0700 Subject: [PATCH 0089/3326] PCI: Apply Cavium ThunderX ACS quirk to more Root Ports commit f2ddaf8dfd4a5071ad09074d2f95ab85d35c8a1e upstream. Extend the Cavium ThunderX ACS quirk to cover more device IDs and restrict it to only Root Ports. Signed-off-by: Vadim Lomovtsev [bhelgaas: changelog, stable tag] Signed-off-by: Bjorn Helgaas Signed-off-by: Greg Kroah-Hartman --- drivers/pci/quirks.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6215511ca898a7..f66f9375177c8a 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4212,6 +4212,19 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) #endif } +static bool pci_quirk_cavium_acs_match(struct pci_dev *dev) +{ + /* + * Effectively selects all downstream ports for whole ThunderX 1 + * family by 0xf800 mask (which represents 8 SoCs), while the lower + * bits of device ID are used to indicate which subdevice is used + * within the SoC. + */ + return (pci_is_pcie(dev) && + (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) && + ((dev->device & 0xf800) == 0xa000)); +} + static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) { /* @@ -4224,7 +4237,7 @@ static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) */ acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF); - if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff))) + if (!pci_quirk_cavium_acs_match(dev)) return -ENOTTY; return acs_flags ? 0 : 1; From ca90f34e2e80597dee73c2e8d55de364a34c112b Mon Sep 17 00:00:00 2001 From: Vijendar Mukunda Date: Thu, 23 Nov 2017 20:07:00 +0530 Subject: [PATCH 0090/3326] ALSA: hda: Add Raven PCI ID commit 9ceace3c9c18c67676e75141032a65a8e01f9a7a upstream. This commit adds PCI ID for Raven platform Signed-off-by: Vijendar Mukunda Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/hda_intel.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index f958d8d54d159c..c71dcacea807bf 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2463,6 +2463,9 @@ static const struct pci_device_id azx_ids[] = { /* AMD Hudson */ { PCI_DEVICE(0x1022, 0x780d), .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, + /* AMD Raven */ + { PCI_DEVICE(0x1022, 0x15e3), + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, /* ATI HDMI */ { PCI_DEVICE(0x1002, 0x0002), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, From a502cd2dd4689f0092c27c450786731c087a56e6 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 7 Nov 2017 10:40:40 -0500 Subject: [PATCH 0091/3326] dm integrity: allow unaligned bv_offset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 95b1369a9638cfa322ad1c0cde8efbe524059884 upstream. When slub_debug is enabled kmalloc returns unaligned memory. XFS uses this unaligned memory for its buffers (if an unaligned buffer crosses a page, XFS frees it and allocates a full page instead - see the function xfs_buf_allocate_memory). dm-integrity checks if bv_offset is aligned on page size and this check fail with slub_debug and XFS. Fix this bug by removing the bv_offset check, leaving only the check for bv_len. Fixes: 7eada909bfd7 ("dm: add integrity target") Reported-by: Bruno Prémont Reviewed-by: Milan Broz Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-integrity.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 096fe9b66c5074..5e6737a44468d6 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -1376,7 +1376,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio) struct bvec_iter iter; struct bio_vec bv; bio_for_each_segment(bv, bio, iter) { - if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { + if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary", bv.bv_offset, bv.bv_len, ic->sectors_per_block); return DM_MAPIO_KILL; From ec544ec956bc791f08d66101bc1269c4d944df7f Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Fri, 10 Nov 2017 07:53:31 -0500 Subject: [PATCH 0092/3326] dm cache: fix race condition in the writeback mode overwrite_bio optimisation commit d1260e2a3f85f4c1010510a15f89597001318b1b upstream. When a DM cache in writeback mode moves data between the slow and fast device it can often avoid a copy if the triggering bio either: i) covers the whole block (no point copying if we're about to overwrite it) ii) the migration is a promotion and the origin block is currently discarded Prior to this fix there was a race with case (ii). The discard status was checked with a shared lock held (rather than exclusive). This meant another bio could run in parallel and write data to the origin, removing the discard state. After the promotion the parallel write would have been lost. With this fix the discard status is re-checked once the exclusive lock has been aquired. If the block is no longer discarded it falls back to the slower full copy path. Fixes: b29d4986d ("dm cache: significant rework to leverage dm-bio-prison-v2") Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-cache-target.c | 86 ++++++++++++++++++++++-------------- 1 file changed, 53 insertions(+), 33 deletions(-) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 8785134c9f1f1a..0b7edfd0b454e4 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -1201,6 +1201,18 @@ static void background_work_end(struct cache *cache) /*----------------------------------------------------------------*/ +static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) +{ + return (bio_data_dir(bio) == WRITE) && + (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); +} + +static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block) +{ + return writeback_mode(&cache->features) && + (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio)); +} + static void quiesce(struct dm_cache_migration *mg, void (*continuation)(struct work_struct *)) { @@ -1474,12 +1486,50 @@ static void mg_upgrade_lock(struct work_struct *ws) } } +static void mg_full_copy(struct work_struct *ws) +{ + struct dm_cache_migration *mg = ws_to_mg(ws); + struct cache *cache = mg->cache; + struct policy_work *op = mg->op; + bool is_policy_promote = (op->op == POLICY_PROMOTE); + + if ((!is_policy_promote && !is_dirty(cache, op->cblock)) || + is_discarded_oblock(cache, op->oblock)) { + mg_upgrade_lock(ws); + return; + } + + init_continuation(&mg->k, mg_upgrade_lock); + + if (copy(mg, is_policy_promote)) { + DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); + mg->k.input = BLK_STS_IOERR; + mg_complete(mg, false); + } +} + static void mg_copy(struct work_struct *ws) { - int r; struct dm_cache_migration *mg = ws_to_mg(ws); if (mg->overwrite_bio) { + /* + * No exclusive lock was held when we last checked if the bio + * was optimisable. So we have to check again in case things + * have changed (eg, the block may no longer be discarded). + */ + if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) { + /* + * Fallback to a real full copy after doing some tidying up. + */ + bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio); + BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */ + mg->overwrite_bio = NULL; + inc_io_migrations(mg->cache); + mg_full_copy(ws); + return; + } + /* * It's safe to do this here, even though it's new data * because all IO has been locked out of the block. @@ -1489,26 +1539,8 @@ static void mg_copy(struct work_struct *ws) */ overwrite(mg, mg_update_metadata_after_copy); - } else { - struct cache *cache = mg->cache; - struct policy_work *op = mg->op; - bool is_policy_promote = (op->op == POLICY_PROMOTE); - - if ((!is_policy_promote && !is_dirty(cache, op->cblock)) || - is_discarded_oblock(cache, op->oblock)) { - mg_upgrade_lock(ws); - return; - } - - init_continuation(&mg->k, mg_upgrade_lock); - - r = copy(mg, is_policy_promote); - if (r) { - DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); - mg->k.input = BLK_STS_IOERR; - mg_complete(mg, false); - } - } + } else + mg_full_copy(ws); } static int mg_lock_writes(struct dm_cache_migration *mg) @@ -1748,18 +1780,6 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio) /*----------------------------------------------------------------*/ -static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) -{ - return (bio_data_dir(bio) == WRITE) && - (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); -} - -static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block) -{ - return writeback_mode(&cache->features) && - (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio)); -} - static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block, bool *commit_needed) { From 8f71f493f46168bf48a02aaba5ae95335d8c7bb3 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 7 Nov 2017 10:35:57 -0500 Subject: [PATCH 0093/3326] dm crypt: allow unaligned bv_offset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 0440d5c0ca9744b92a07aeb6df0a9a75db6f4280 upstream. When slub_debug is enabled kmalloc returns unaligned memory. XFS uses this unaligned memory for its buffers (if an unaligned buffer crosses a page, XFS frees it and allocates a full page instead - see the function xfs_buf_allocate_memory). dm-crypt checks if bv_offset is aligned on page size and these checks fail with slub_debug and XFS. Fix this bug by removing the bv_offset checks. Switch to checking if bv_len is aligned instead of bv_offset (this check should be sufficient to prevent overruns if a bio with too small bv_len is received). Fixes: 8f0009a22517 ("dm crypt: optionally support larger encryption sector size") Reported-by: Bruno Prémont Tested-by: Bruno Prémont Signed-off-by: Mikulas Patocka Reviewed-by: Milan Broz Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-crypt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 96ab46512e1fdc..9fc12f556534b7 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1075,7 +1075,7 @@ static int crypt_convert_block_aead(struct crypt_config *cc, BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size); /* Reject unexpected unaligned bio. */ - if (unlikely(bv_in.bv_offset & (cc->sector_size - 1))) + if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) return -EIO; dmreq = dmreq_of_req(cc, req); @@ -1168,7 +1168,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc, int r = 0; /* Reject unexpected unaligned bio. */ - if (unlikely(bv_in.bv_offset & (cc->sector_size - 1))) + if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) return -EIO; dmreq = dmreq_of_req(cc, req); From ac29afdb459c9a75916edc1ee77e2d1e73c50353 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Sat, 28 Oct 2017 16:39:34 +0900 Subject: [PATCH 0094/3326] dm zoned: ignore last smaller runt zone commit 114e025968b5990ad0b57bf60697ea64ee206aac upstream. The SCSI layer allows ZBC drives to have a smaller last runt zone. For such a device, specifying the entire capacity for a dm-zoned target table entry fails because the specified capacity is not aligned on a device zone size indicated in the request queue structure of the device. Fix this problem by ignoring the last runt zone in the entry length when seting up the dm-zoned target (ctr method) and when iterating table entries of the target (iterate_devices method). This allows dm-zoned users to still easily setup a target using the entire device capacity (as mandated by dm-zoned) or the aligned capacity excluding the last runt zone. While at it, replace direct references to the device queue chunk_sectors limit with calls to the accessor blk_queue_zone_sectors(). Reported-by: Peter Desnoyers Signed-off-by: Damien Le Moal Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-zoned-target.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index b87c1741da4b88..6d7bda6f819058 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -660,6 +660,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path) struct dmz_target *dmz = ti->private; struct request_queue *q; struct dmz_dev *dev; + sector_t aligned_capacity; int ret; /* Get the target device */ @@ -685,15 +686,17 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path) goto err; } + q = bdev_get_queue(dev->bdev); dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; - if (ti->begin || (ti->len != dev->capacity)) { + aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1); + if (ti->begin || + ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) { ti->error = "Partial mapping not supported"; ret = -EINVAL; goto err; } - q = bdev_get_queue(dev->bdev); - dev->zone_nr_sectors = q->limits.chunk_sectors; + dev->zone_nr_sectors = blk_queue_zone_sectors(q); dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors); dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors); @@ -929,8 +932,10 @@ static int dmz_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct dmz_target *dmz = ti->private; + struct dmz_dev *dev = dmz->dev; + sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1); - return fn(ti, dmz->ddev, 0, dmz->dev->capacity, data); + return fn(ti, dmz->ddev, 0, capacity, data); } static struct target_type dmz_type = { From 4d7a55f5b8099224e250e5e87882e4d7168af129 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sat, 30 Sep 2017 19:46:48 +0800 Subject: [PATCH 0095/3326] dm mpath: remove annoying message of 'blk_get_request() returned -11' commit 9dc112e2daf87b40607fd8d357e2d7de32290d45 upstream. It is very normal to see allocation failure, especially with blk-mq request_queues, so it's unnecessary to report this error and annoy people. In practice this 'blk_get_request() returned -11' error gets logged quite frequently when a blk-mq DM multipath device sees heavy IO. This change is marked for stable@ because the annoying message in question was included in stable@ commit 7083abbbf. Fixes: 7083abbbf ("dm mpath: avoid that path removal can trigger an infinite loop") Signed-off-by: Ming Lei Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-mpath.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 11f273d2f018e7..e8094d8fbe0d5f 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -499,8 +499,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, if (IS_ERR(clone)) { /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ bool queue_dying = blk_queue_dying(q); - DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing", - PTR_ERR(clone), queue_dying ? " (path offline)" : ""); if (queue_dying) { atomic_inc(&m->pg_init_in_progress); activate_or_offline_path(pgpath); From 08720bf98b6ac0a3ebffeb80c6c6cc992c374d90 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 15 Nov 2017 16:38:09 -0800 Subject: [PATCH 0096/3326] dm bufio: fix integer overflow when limiting maximum cache size commit 74d4108d9e681dbbe4a2940ed8fdff1f6868184c upstream. The default max_cache_size_bytes for dm-bufio is meant to be the lesser of 25% of the size of the vmalloc area and 2% of the size of lowmem. However, on 32-bit systems the intermediate result in the expression (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100 overflows, causing the wrong result to be computed. For example, on a 32-bit system where the vmalloc area is 520093696 bytes, the result is 1174405 rather than the expected 130023424, which makes the maximum cache size much too small (far less than 2% of lowmem). This causes severe performance problems for dm-verity users on affected systems. Fix this by using mult_frac() to correctly multiply by a percentage. Do this for all places in dm-bufio that multiply by a percentage. Also replace (VMALLOC_END - VMALLOC_START) with VMALLOC_TOTAL, which contrary to the comment is now defined in include/linux/vmalloc.h. Depends-on: 9993bc635 ("sched/x86: Fix overflow in cyc2ns_offset") Fixes: 95d402f057f2 ("dm: add bufio") Signed-off-by: Eric Biggers Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-bufio.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index d216a8f7bc224c..8e3adcb46851e8 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -974,7 +974,8 @@ static void __get_memory_limit(struct dm_bufio_client *c, buffers = c->minimum_buffers; *limit_buffers = buffers; - *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100; + *threshold_buffers = mult_frac(buffers, + DM_BUFIO_WRITEBACK_PERCENT, 100); } /* @@ -1910,19 +1911,15 @@ static int __init dm_bufio_init(void) memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); - mem = (__u64)((totalram_pages - totalhigh_pages) * - DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT; + mem = (__u64)mult_frac(totalram_pages - totalhigh_pages, + DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; if (mem > ULONG_MAX) mem = ULONG_MAX; #ifdef CONFIG_MMU - /* - * Get the size of vmalloc space the same way as VMALLOC_TOTAL - * in fs/proc/internal.h - */ - if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100) - mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100; + if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100)) + mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100); #endif dm_bufio_default_cache_size = mem; From 13e6560075607ff354b48556f306bee925328325 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 1 Nov 2017 15:37:22 -0400 Subject: [PATCH 0097/3326] ovl: Put upperdentry if ovl_check_origin() fails commit 5455f92b54e516995a9ca45bbf790d3629c27a93 upstream. If ovl_check_origin() fails, we should put upperdentry. We have a reference on it by now. So goto out_put_upper instead of out. Fixes: a9d019573e88 ("ovl: lookup non-dir copy-up-origin by file handle") Signed-off-by: Vivek Goyal Signed-off-by: Miklos Szeredi Signed-off-by: Greg Kroah-Hartman --- fs/overlayfs/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index a12dc10bf72635..bc6d5c5a344353 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -630,7 +630,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, err = ovl_check_origin(upperdentry, roe->lowerstack, roe->numlower, &stack, &ctr); if (err) - goto out; + goto out_put_upper; } if (d.redirect) { From 2bf483c9a402941b5594e5ac8ad3b2b4a335fd34 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 31 Oct 2017 19:33:02 -0400 Subject: [PATCH 0098/3326] dm: allocate struct mapped_device with kvzalloc commit 856eb0916d181da6d043cc33e03f54d5c5bbe54a upstream. The structure srcu_struct can be very big, its size is proportional to the value CONFIG_NR_CPUS. The Fedora kernel has CONFIG_NR_CPUS 8192, the field io_barrier in the struct mapped_device has 84kB in the debugging kernel and 50kB in the non-debugging kernel. The large size may result in failure of the function kzalloc_node. In order to avoid the allocation failure, we use the function kvzalloc_node, this function falls back to vmalloc if a large contiguous chunk of memory is not available. This patch also moves the field io_barrier to the last position of struct mapped_device - the reason is that on many processor architectures, short memory offsets result in smaller code than long memory offsets - on x86-64 it reduces code size by 320 bytes. Note to stable kernel maintainers - the kernels 4.11 and older don't have the function kvzalloc_node, you can use the function vzalloc_node instead. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-core.h | 3 ++- drivers/md/dm.c | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 203144762f3688..6a14f945783c6c 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -29,7 +29,6 @@ struct dm_kobject_holder { * DM targets must _not_ deference a mapped_device to directly access its members! */ struct mapped_device { - struct srcu_struct io_barrier; struct mutex suspend_lock; /* @@ -127,6 +126,8 @@ struct mapped_device { struct blk_mq_tag_set *tag_set; bool use_blk_mq:1; bool init_tio_pdu:1; + + struct srcu_struct io_barrier; }; void dm_init_md_queue(struct mapped_device *md); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4be85324f44dc2..a88c87c0af74a0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1695,7 +1695,7 @@ static struct mapped_device *alloc_dev(int minor) struct mapped_device *md; void *old_md; - md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); + md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); if (!md) { DMWARN("unable to allocate device, out of memory."); return NULL; @@ -1795,7 +1795,7 @@ static struct mapped_device *alloc_dev(int minor) bad_minor: module_put(THIS_MODULE); bad_module_get: - kfree(md); + kvfree(md); return NULL; } @@ -1814,7 +1814,7 @@ static void free_dev(struct mapped_device *md) free_minor(minor); module_put(THIS_MODULE); - kfree(md); + kvfree(md); } static void __bind_mempools(struct mapped_device *md, struct dm_table *t) From f17c786b28a3060a566a170c2cf3bd7441fc30a3 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Fri, 6 Oct 2017 14:05:04 -0400 Subject: [PATCH 0099/3326] sched/rt: Simplify the IPI based RT balancing logic commit 4bdced5c9a2922521e325896a7bbbf0132c94e56 upstream. When a CPU lowers its priority (schedules out a high priority task for a lower priority one), a check is made to see if any other CPU has overloaded RT tasks (more than one). It checks the rto_mask to determine this and if so it will request to pull one of those tasks to itself if the non running RT task is of higher priority than the new priority of the next task to run on the current CPU. When we deal with large number of CPUs, the original pull logic suffered from large lock contention on a single CPU run queue, which caused a huge latency across all CPUs. This was caused by only having one CPU having overloaded RT tasks and a bunch of other CPUs lowering their priority. To solve this issue, commit: b6366f048e0c ("sched/rt: Use IPI to trigger RT task push migration instead of pulling") changed the way to request a pull. Instead of grabbing the lock of the overloaded CPU's runqueue, it simply sent an IPI to that CPU to do the work. Although the IPI logic worked very well in removing the large latency build up, it still could suffer from a large number of IPIs being sent to a single CPU. On a 80 CPU box, I measured over 200us of processing IPIs. Worse yet, when I tested this on a 120 CPU box, with a stress test that had lots of RT tasks scheduling on all CPUs, it actually triggered the hard lockup detector! One CPU had so many IPIs sent to it, and due to the restart mechanism that is triggered when the source run queue has a priority status change, the CPU spent minutes! processing the IPIs. Thinking about this further, I realized there's no reason for each run queue to send its own IPI. As all CPUs with overloaded tasks must be scanned regardless if there's one or many CPUs lowering their priority, because there's no current way to find the CPU with the highest priority task that can schedule to one of these CPUs, there really only needs to be one IPI being sent around at a time. This greatly simplifies the code! The new approach is to have each root domain have its own irq work, as the rto_mask is per root domain. The root domain has the following fields attached to it: rto_push_work - the irq work to process each CPU set in rto_mask rto_lock - the lock to protect some of the other rto fields rto_loop_start - an atomic that keeps contention down on rto_lock the first CPU scheduling in a lower priority task is the one to kick off the process. rto_loop_next - an atomic that gets incremented for each CPU that schedules in a lower priority task. rto_loop - a variable protected by rto_lock that is used to compare against rto_loop_next rto_cpu - The cpu to send the next IPI to, also protected by the rto_lock. When a CPU schedules in a lower priority task and wants to make sure overloaded CPUs know about it. It increments the rto_loop_next. Then it atomically sets rto_loop_start with a cmpxchg. If the old value is not "0", then it is done, as another CPU is kicking off the IPI loop. If the old value is "0", then it will take the rto_lock to synchronize with a possible IPI being sent around to the overloaded CPUs. If rto_cpu is greater than or equal to nr_cpu_ids, then there's either no IPI being sent around, or one is about to finish. Then rto_cpu is set to the first CPU in rto_mask and an IPI is sent to that CPU. If there's no CPUs set in rto_mask, then there's nothing to be done. When the CPU receives the IPI, it will first try to push any RT tasks that is queued on the CPU but can't run because a higher priority RT task is currently running on that CPU. Then it takes the rto_lock and looks for the next CPU in the rto_mask. If it finds one, it simply sends an IPI to that CPU and the process continues. If there's no more CPUs in the rto_mask, then rto_loop is compared with rto_loop_next. If they match, everything is done and the process is over. If they do not match, then a CPU scheduled in a lower priority task as the IPI was being passed around, and the process needs to start again. The first CPU in rto_mask is sent the IPI. This change removes this duplication of work in the IPI logic, and greatly lowers the latency caused by the IPIs. This removed the lockup happening on the 120 CPU machine. It also simplifies the code tremendously. What else could anyone ask for? Thanks to Peter Zijlstra for simplifying the rto_loop_start atomic logic and supplying me with the rto_start_trylock() and rto_start_unlock() helper functions. Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Peter Zijlstra (Intel) Cc: Clark Williams Cc: Daniel Bristot de Oliveira Cc: John Kacur Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Scott Wood Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20170424114732.1aac6dc4@gandalf.local.home Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- kernel/sched/rt.c | 316 +++++++++++++++------------------------- kernel/sched/sched.h | 24 ++- kernel/sched/topology.c | 6 + 3 files changed, 138 insertions(+), 208 deletions(-) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 3c96c80e0992af..d8c43d73e07880 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -74,10 +74,6 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) raw_spin_unlock(&rt_b->rt_runtime_lock); } -#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI) -static void push_irq_work_func(struct irq_work *work); -#endif - void init_rt_rq(struct rt_rq *rt_rq) { struct rt_prio_array *array; @@ -97,13 +93,6 @@ void init_rt_rq(struct rt_rq *rt_rq) rt_rq->rt_nr_migratory = 0; rt_rq->overloaded = 0; plist_head_init(&rt_rq->pushable_tasks); - -#ifdef HAVE_RT_PUSH_IPI - rt_rq->push_flags = 0; - rt_rq->push_cpu = nr_cpu_ids; - raw_spin_lock_init(&rt_rq->push_lock); - init_irq_work(&rt_rq->push_work, push_irq_work_func); -#endif #endif /* CONFIG_SMP */ /* We start is dequeued state, because no RT tasks are queued */ rt_rq->rt_queued = 0; @@ -1876,241 +1865,166 @@ static void push_rt_tasks(struct rq *rq) } #ifdef HAVE_RT_PUSH_IPI + /* - * The search for the next cpu always starts at rq->cpu and ends - * when we reach rq->cpu again. It will never return rq->cpu. - * This returns the next cpu to check, or nr_cpu_ids if the loop - * is complete. + * When a high priority task schedules out from a CPU and a lower priority + * task is scheduled in, a check is made to see if there's any RT tasks + * on other CPUs that are waiting to run because a higher priority RT task + * is currently running on its CPU. In this case, the CPU with multiple RT + * tasks queued on it (overloaded) needs to be notified that a CPU has opened + * up that may be able to run one of its non-running queued RT tasks. + * + * All CPUs with overloaded RT tasks need to be notified as there is currently + * no way to know which of these CPUs have the highest priority task waiting + * to run. Instead of trying to take a spinlock on each of these CPUs, + * which has shown to cause large latency when done on machines with many + * CPUs, sending an IPI to the CPUs to have them push off the overloaded + * RT tasks waiting to run. + * + * Just sending an IPI to each of the CPUs is also an issue, as on large + * count CPU machines, this can cause an IPI storm on a CPU, especially + * if its the only CPU with multiple RT tasks queued, and a large number + * of CPUs scheduling a lower priority task at the same time. + * + * Each root domain has its own irq work function that can iterate over + * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT + * tassk must be checked if there's one or many CPUs that are lowering + * their priority, there's a single irq work iterator that will try to + * push off RT tasks that are waiting to run. + * + * When a CPU schedules a lower priority task, it will kick off the + * irq work iterator that will jump to each CPU with overloaded RT tasks. + * As it only takes the first CPU that schedules a lower priority task + * to start the process, the rto_start variable is incremented and if + * the atomic result is one, then that CPU will try to take the rto_lock. + * This prevents high contention on the lock as the process handles all + * CPUs scheduling lower priority tasks. + * + * All CPUs that are scheduling a lower priority task will increment the + * rt_loop_next variable. This will make sure that the irq work iterator + * checks all RT overloaded CPUs whenever a CPU schedules a new lower + * priority task, even if the iterator is in the middle of a scan. Incrementing + * the rt_loop_next will cause the iterator to perform another scan. * - * rq->rt.push_cpu holds the last cpu returned by this function, - * or if this is the first instance, it must hold rq->cpu. */ static int rto_next_cpu(struct rq *rq) { - int prev_cpu = rq->rt.push_cpu; + struct root_domain *rd = rq->rd; + int next; int cpu; - cpu = cpumask_next(prev_cpu, rq->rd->rto_mask); - /* - * If the previous cpu is less than the rq's CPU, then it already - * passed the end of the mask, and has started from the beginning. - * We end if the next CPU is greater or equal to rq's CPU. + * When starting the IPI RT pushing, the rto_cpu is set to -1, + * rt_next_cpu() will simply return the first CPU found in + * the rto_mask. + * + * If rto_next_cpu() is called with rto_cpu is a valid cpu, it + * will return the next CPU found in the rto_mask. + * + * If there are no more CPUs left in the rto_mask, then a check is made + * against rto_loop and rto_loop_next. rto_loop is only updated with + * the rto_lock held, but any CPU may increment the rto_loop_next + * without any locking. */ - if (prev_cpu < rq->cpu) { - if (cpu >= rq->cpu) - return nr_cpu_ids; + for (;;) { - } else if (cpu >= nr_cpu_ids) { - /* - * We passed the end of the mask, start at the beginning. - * If the result is greater or equal to the rq's CPU, then - * the loop is finished. - */ - cpu = cpumask_first(rq->rd->rto_mask); - if (cpu >= rq->cpu) - return nr_cpu_ids; - } - rq->rt.push_cpu = cpu; + /* When rto_cpu is -1 this acts like cpumask_first() */ + cpu = cpumask_next(rd->rto_cpu, rd->rto_mask); - /* Return cpu to let the caller know if the loop is finished or not */ - return cpu; -} + rd->rto_cpu = cpu; -static int find_next_push_cpu(struct rq *rq) -{ - struct rq *next_rq; - int cpu; + if (cpu < nr_cpu_ids) + return cpu; - while (1) { - cpu = rto_next_cpu(rq); - if (cpu >= nr_cpu_ids) - break; - next_rq = cpu_rq(cpu); + rd->rto_cpu = -1; + + /* + * ACQUIRE ensures we see the @rto_mask changes + * made prior to the @next value observed. + * + * Matches WMB in rt_set_overload(). + */ + next = atomic_read_acquire(&rd->rto_loop_next); - /* Make sure the next rq can push to this rq */ - if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr) + if (rd->rto_loop == next) break; + + rd->rto_loop = next; } - return cpu; + return -1; } -#define RT_PUSH_IPI_EXECUTING 1 -#define RT_PUSH_IPI_RESTART 2 +static inline bool rto_start_trylock(atomic_t *v) +{ + return !atomic_cmpxchg_acquire(v, 0, 1); +} -/* - * When a high priority task schedules out from a CPU and a lower priority - * task is scheduled in, a check is made to see if there's any RT tasks - * on other CPUs that are waiting to run because a higher priority RT task - * is currently running on its CPU. In this case, the CPU with multiple RT - * tasks queued on it (overloaded) needs to be notified that a CPU has opened - * up that may be able to run one of its non-running queued RT tasks. - * - * On large CPU boxes, there's the case that several CPUs could schedule - * a lower priority task at the same time, in which case it will look for - * any overloaded CPUs that it could pull a task from. To do this, the runqueue - * lock must be taken from that overloaded CPU. Having 10s of CPUs all fighting - * for a single overloaded CPU's runqueue lock can produce a large latency. - * (This has actually been observed on large boxes running cyclictest). - * Instead of taking the runqueue lock of the overloaded CPU, each of the - * CPUs that scheduled a lower priority task simply sends an IPI to the - * overloaded CPU. An IPI is much cheaper than taking an runqueue lock with - * lots of contention. The overloaded CPU will look to push its non-running - * RT task off, and if it does, it can then ignore the other IPIs coming - * in, and just pass those IPIs off to any other overloaded CPU. - * - * When a CPU schedules a lower priority task, it only sends an IPI to - * the "next" CPU that has overloaded RT tasks. This prevents IPI storms, - * as having 10 CPUs scheduling lower priority tasks and 10 CPUs with - * RT overloaded tasks, would cause 100 IPIs to go out at once. - * - * The overloaded RT CPU, when receiving an IPI, will try to push off its - * overloaded RT tasks and then send an IPI to the next CPU that has - * overloaded RT tasks. This stops when all CPUs with overloaded RT tasks - * have completed. Just because a CPU may have pushed off its own overloaded - * RT task does not mean it should stop sending the IPI around to other - * overloaded CPUs. There may be another RT task waiting to run on one of - * those CPUs that are of higher priority than the one that was just - * pushed. - * - * An optimization that could possibly be made is to make a CPU array similar - * to the cpupri array mask of all running RT tasks, but for the overloaded - * case, then the IPI could be sent to only the CPU with the highest priority - * RT task waiting, and that CPU could send off further IPIs to the CPU with - * the next highest waiting task. Since the overloaded case is much less likely - * to happen, the complexity of this implementation may not be worth it. - * Instead, just send an IPI around to all overloaded CPUs. - * - * The rq->rt.push_flags holds the status of the IPI that is going around. - * A run queue can only send out a single IPI at a time. The possible flags - * for rq->rt.push_flags are: - * - * (None or zero): No IPI is going around for the current rq - * RT_PUSH_IPI_EXECUTING: An IPI for the rq is being passed around - * RT_PUSH_IPI_RESTART: The priority of the running task for the rq - * has changed, and the IPI should restart - * circulating the overloaded CPUs again. - * - * rq->rt.push_cpu contains the CPU that is being sent the IPI. It is updated - * before sending to the next CPU. - * - * Instead of having all CPUs that schedule a lower priority task send - * an IPI to the same "first" CPU in the RT overload mask, they send it - * to the next overloaded CPU after their own CPU. This helps distribute - * the work when there's more than one overloaded CPU and multiple CPUs - * scheduling in lower priority tasks. - * - * When a rq schedules a lower priority task than what was currently - * running, the next CPU with overloaded RT tasks is examined first. - * That is, if CPU 1 and 5 are overloaded, and CPU 3 schedules a lower - * priority task, it will send an IPI first to CPU 5, then CPU 5 will - * send to CPU 1 if it is still overloaded. CPU 1 will clear the - * rq->rt.push_flags if RT_PUSH_IPI_RESTART is not set. - * - * The first CPU to notice IPI_RESTART is set, will clear that flag and then - * send an IPI to the next overloaded CPU after the rq->cpu and not the next - * CPU after push_cpu. That is, if CPU 1, 4 and 5 are overloaded when CPU 3 - * schedules a lower priority task, and the IPI_RESTART gets set while the - * handling is being done on CPU 5, it will clear the flag and send it back to - * CPU 4 instead of CPU 1. - * - * Note, the above logic can be disabled by turning off the sched_feature - * RT_PUSH_IPI. Then the rq lock of the overloaded CPU will simply be - * taken by the CPU requesting a pull and the waiting RT task will be pulled - * by that CPU. This may be fine for machines with few CPUs. - */ -static void tell_cpu_to_push(struct rq *rq) +static inline void rto_start_unlock(atomic_t *v) { - int cpu; + atomic_set_release(v, 0); +} - if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) { - raw_spin_lock(&rq->rt.push_lock); - /* Make sure it's still executing */ - if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) { - /* - * Tell the IPI to restart the loop as things have - * changed since it started. - */ - rq->rt.push_flags |= RT_PUSH_IPI_RESTART; - raw_spin_unlock(&rq->rt.push_lock); - return; - } - raw_spin_unlock(&rq->rt.push_lock); - } +static void tell_cpu_to_push(struct rq *rq) +{ + int cpu = -1; - /* When here, there's no IPI going around */ + /* Keep the loop going if the IPI is currently active */ + atomic_inc(&rq->rd->rto_loop_next); - rq->rt.push_cpu = rq->cpu; - cpu = find_next_push_cpu(rq); - if (cpu >= nr_cpu_ids) + /* Only one CPU can initiate a loop at a time */ + if (!rto_start_trylock(&rq->rd->rto_loop_start)) return; - rq->rt.push_flags = RT_PUSH_IPI_EXECUTING; + raw_spin_lock(&rq->rd->rto_lock); + + /* + * The rto_cpu is updated under the lock, if it has a valid cpu + * then the IPI is still running and will continue due to the + * update to loop_next, and nothing needs to be done here. + * Otherwise it is finishing up and an ipi needs to be sent. + */ + if (rq->rd->rto_cpu < 0) + cpu = rto_next_cpu(rq); - irq_work_queue_on(&rq->rt.push_work, cpu); + raw_spin_unlock(&rq->rd->rto_lock); + + rto_start_unlock(&rq->rd->rto_loop_start); + + if (cpu >= 0) + irq_work_queue_on(&rq->rd->rto_push_work, cpu); } /* Called from hardirq context */ -static void try_to_push_tasks(void *arg) +void rto_push_irq_work_func(struct irq_work *work) { - struct rt_rq *rt_rq = arg; - struct rq *rq, *src_rq; - int this_cpu; + struct rq *rq; int cpu; - this_cpu = rt_rq->push_cpu; + rq = this_rq(); - /* Paranoid check */ - BUG_ON(this_cpu != smp_processor_id()); - - rq = cpu_rq(this_cpu); - src_rq = rq_of_rt_rq(rt_rq); - -again: + /* + * We do not need to grab the lock to check for has_pushable_tasks. + * When it gets updated, a check is made if a push is possible. + */ if (has_pushable_tasks(rq)) { raw_spin_lock(&rq->lock); - push_rt_task(rq); + push_rt_tasks(rq); raw_spin_unlock(&rq->lock); } - /* Pass the IPI to the next rt overloaded queue */ - raw_spin_lock(&rt_rq->push_lock); - /* - * If the source queue changed since the IPI went out, - * we need to restart the search from that CPU again. - */ - if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) { - rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART; - rt_rq->push_cpu = src_rq->cpu; - } + raw_spin_lock(&rq->rd->rto_lock); - cpu = find_next_push_cpu(src_rq); + /* Pass the IPI to the next rt overloaded queue */ + cpu = rto_next_cpu(rq); - if (cpu >= nr_cpu_ids) - rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING; - raw_spin_unlock(&rt_rq->push_lock); + raw_spin_unlock(&rq->rd->rto_lock); - if (cpu >= nr_cpu_ids) + if (cpu < 0) return; - /* - * It is possible that a restart caused this CPU to be - * chosen again. Don't bother with an IPI, just see if we - * have more to push. - */ - if (unlikely(cpu == rq->cpu)) - goto again; - /* Try the next RT overloaded CPU */ - irq_work_queue_on(&rt_rq->push_work, cpu); -} - -static void push_irq_work_func(struct irq_work *work) -{ - struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work); - - try_to_push_tasks(rt_rq); + irq_work_queue_on(&rq->rd->rto_push_work, cpu); } #endif /* HAVE_RT_PUSH_IPI */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 3b448ba82225df..b732e779fe7d55 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -502,7 +502,7 @@ static inline int rt_bandwidth_enabled(void) } /* RT IPI pull logic requires IRQ_WORK */ -#ifdef CONFIG_IRQ_WORK +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) # define HAVE_RT_PUSH_IPI #endif @@ -524,12 +524,6 @@ struct rt_rq { unsigned long rt_nr_total; int overloaded; struct plist_head pushable_tasks; -#ifdef HAVE_RT_PUSH_IPI - int push_flags; - int push_cpu; - struct irq_work push_work; - raw_spinlock_t push_lock; -#endif #endif /* CONFIG_SMP */ int rt_queued; @@ -638,6 +632,19 @@ struct root_domain { struct dl_bw dl_bw; struct cpudl cpudl; +#ifdef HAVE_RT_PUSH_IPI + /* + * For IPI pull requests, loop across the rto_mask. + */ + struct irq_work rto_push_work; + raw_spinlock_t rto_lock; + /* These are only updated and read within rto_lock */ + int rto_loop; + int rto_cpu; + /* These atomics are updated outside of a lock */ + atomic_t rto_loop_next; + atomic_t rto_loop_start; +#endif /* * The "RT overload" flag: it gets set if a CPU has more than * one runnable RT task. @@ -655,6 +662,9 @@ extern void init_defrootdomain(void); extern int sched_init_domains(const struct cpumask *cpu_map); extern void rq_attach_root(struct rq *rq, struct root_domain *rd); +#ifdef HAVE_RT_PUSH_IPI +extern void rto_push_irq_work_func(struct irq_work *work); +#endif #endif /* CONFIG_SMP */ /* diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 6798276d29af2e..093f2ceba2e241 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -269,6 +269,12 @@ static int init_rootdomain(struct root_domain *rd) if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) goto free_dlo_mask; +#ifdef HAVE_RT_PUSH_IPI + rd->rto_cpu = -1; + raw_spin_lock_init(&rd->rto_lock); + init_irq_work(&rd->rto_push_work, rto_push_irq_work_func); +#endif + init_dl_bw(&rd->dl_bw); if (cpudl_init(&rd->cpudl) != 0) goto free_rto_mask; From 9be341edebf5880a53ff8b27d05c7d229797c4be Mon Sep 17 00:00:00 2001 From: John Crispin Date: Mon, 20 Feb 2017 10:29:43 +0100 Subject: [PATCH 0100/3326] MIPS: pci: Remove KERN_WARN instance inside the mt7620 driver commit 8593b18ad348733b5d5ddfa0c79dcabf51dff308 upstream. Switch the printk() call to the prefered pr_warn() api. Fixes: 7e5873d3755c ("MIPS: pci: Add MT7620a PCIE driver") Signed-off-by: John Crispin Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/15321/ Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/pci/pci-mt7620.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c index 90fba9bf98da71..27ac00c36bc05b 100644 --- a/arch/mips/pci/pci-mt7620.c +++ b/arch/mips/pci/pci-mt7620.c @@ -121,7 +121,7 @@ static int wait_pciephy_busy(void) else break; if (retry++ > WAITRETRY_MAX) { - printk(KERN_WARN "PCIE-PHY retry failed.\n"); + pr_warn("PCIE-PHY retry failed.\n"); return -1; } } From 3bfb87ecb4f7e19c53c31b7beca4ecd9bb906a67 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Wed, 1 Nov 2017 15:42:36 +0800 Subject: [PATCH 0101/3326] dm: fix race between dm_get_from_kobject() and __dm_destroy() commit b9a41d21dceadf8104812626ef85dc56ee8a60ed upstream. The following BUG_ON was hit when testing repeat creation and removal of DM devices: kernel BUG at drivers/md/dm.c:2919! CPU: 7 PID: 750 Comm: systemd-udevd Not tainted 4.1.44 Call Trace: [] dm_get_from_kobject+0x34/0x3a [] dm_attr_show+0x2b/0x5e [] ? mutex_lock+0x26/0x44 [] sysfs_kf_seq_show+0x83/0xcf [] kernfs_seq_show+0x23/0x25 [] seq_read+0x16f/0x325 [] kernfs_fop_read+0x3a/0x13f [] __vfs_read+0x26/0x9d [] ? security_file_permission+0x3c/0x44 [] ? rw_verify_area+0x83/0xd9 [] vfs_read+0x8f/0xcf [] ? __fdget_pos+0x12/0x41 [] SyS_read+0x4b/0x76 [] system_call_fastpath+0x12/0x71 The bug can be easily triggered, if an extra delay (e.g. 10ms) is added between the test of DMF_FREEING & DMF_DELETING and dm_get() in dm_get_from_kobject(). To fix it, we need to ensure the test of DMF_FREEING & DMF_DELETING and dm_get() are done in an atomic way, so _minor_lock is used. The other callers of dm_get() have also been checked to be OK: some callers invoke dm_get() under _minor_lock, some callers invoke it under _hash_lock, and dm_start_request() invoke it after increasing md->open_count. Signed-off-by: Hou Tao Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a88c87c0af74a0..804419635cc7d3 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2709,11 +2709,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj) md = container_of(kobj, struct mapped_device, kobj_holder.kobj); - if (test_bit(DMF_FREEING, &md->flags) || - dm_deleting_md(md)) - return NULL; - + spin_lock(&_minor_lock); + if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { + md = NULL; + goto out; + } dm_get(md); +out: + spin_unlock(&_minor_lock); + return md; } From e39516d24fb8c3c1bcf1b1b5ba20e0c7093e1512 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Tue, 14 Nov 2017 15:40:52 -0500 Subject: [PATCH 0102/3326] dm: discard support requires all targets in a table support discards commit 8a74d29d541cd86569139c6f3f44b2d210458071 upstream. A DM device with a mix of discard capabilities (due to some underlying devices not having discard support) _should_ just return -EOPNOTSUPP for the region of the device that doesn't support discards (even if only by way of the underlying driver formally not supporting discards). BUT, that does ask the underlying driver to handle something that it never advertised support for. In doing so we're exposing users to the potential for a underlying disk driver hanging if/when a discard is issued a the device that is incapable and never claimed to support discards. Fix this by requiring that each DM target in a DM table provide discard support as a prereq for a DM device to advertise support for discards. This may cause some configurations that were happily supporting discards (even in the face of a mix of discard support) to stop supporting discards -- but the risk of users hitting driver hangs, and forced reboots, outweighs supporting those fringe mixed discard configurations. Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-table.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ef7b8f201f73ad..4287fc9f3527f8 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1758,13 +1758,12 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t) return true; } - -static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && blk_queue_discard(q); + return q && !blk_queue_discard(q); } static bool dm_table_supports_discards(struct dm_table *t) @@ -1772,28 +1771,24 @@ static bool dm_table_supports_discards(struct dm_table *t) struct dm_target *ti; unsigned i; - /* - * Unless any target used by the table set discards_supported, - * require at least one underlying device to support discards. - * t->devices includes internal dm devices such as mirror logs - * so we need to use iterate_devices here, which targets - * supporting discard selectively must provide. - */ for (i = 0; i < dm_table_get_num_targets(t); i++) { ti = dm_table_get_target(t, i); if (!ti->num_discard_bios) - continue; - - if (ti->discards_supported) - return true; + return false; - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, device_discard_capable, NULL)) - return true; + /* + * Either the target provides discard support (as implied by setting + * 'discards_supported') or it relies on _all_ data devices having + * discard support. + */ + if (!ti->discards_supported && + (!ti->type->iterate_devices || + ti->type->iterate_devices(ti, device_not_discard_capable, NULL))) + return false; } - return false; + return true; } void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, From 43292e652771658e976dd6e263172b7043cb98f0 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 10 Nov 2017 11:46:54 +0000 Subject: [PATCH 0103/3326] MIPS: Fix odd fp register warnings with MIPS64r2 commit c7fd89a6407ea3a44a2a2fa12d290162c42499c4 upstream. Building 32-bit MIPS64r2 kernels produces warnings like the following on certain toolchains (such as GNU assembler 2.24.90, but not GNU assembler 2.28.51) since commit 22b8ba765a72 ("MIPS: Fix MIPS64 FP save/restore on 32-bit kernels"), due to the exposure of fpu_save_16odd from fpu_save_double and fpu_restore_16odd from fpu_restore_double: arch/mips/kernel/r4k_fpu.S:47: Warning: float register should be even, was 1 ... arch/mips/kernel/r4k_fpu.S:59: Warning: float register should be even, was 1 ... This appears to be because .set mips64r2 does not change the FPU ABI to 64-bit when -march=mips64r2 (or e.g. -march=xlp) is provided on the command line on that toolchain, from the default FPU ABI of 32-bit due to the -mabi=32. This makes access to the odd FPU registers invalid. Fix by explicitly changing the FPU ABI with .set fp=64 directives in fpu_save_16odd and fpu_restore_16odd, and moving the undefine of fp up in asmmacro.h so fp doesn't turn into $30. Fixes: 22b8ba765a72 ("MIPS: Fix MIPS64 FP save/restore on 32-bit kernels") Signed-off-by: James Hogan Cc: Ralf Baechle Cc: Paul Burton Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/17656/ Signed-off-by: Greg Kroah-Hartman --- arch/mips/include/asm/asmmacro.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 83054f79f72aa6..8333ce90b1726e 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -19,6 +19,9 @@ #include #endif +/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ +#undef fp + /* * Helper macros for generating raw instruction encodings. */ @@ -105,6 +108,7 @@ .macro fpu_save_16odd thread .set push .set mips64r2 + .set fp=64 SET_HARDFLOAT sdc1 $f1, THREAD_FPR1(\thread) sdc1 $f3, THREAD_FPR3(\thread) @@ -163,6 +167,7 @@ .macro fpu_restore_16odd thread .set push .set mips64r2 + .set fp=64 SET_HARDFLOAT ldc1 $f1, THREAD_FPR1(\thread) ldc1 $f3, THREAD_FPR3(\thread) @@ -234,9 +239,6 @@ .endm #ifdef TOOLCHAIN_SUPPORTS_MSA -/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ -#undef fp - .macro _cfcmsa rd, cs .set push .set mips32r2 From d63534042a1b279ab50b907a4d9bcd5f9d632e98 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Mon, 3 Jul 2017 23:41:47 +0100 Subject: [PATCH 0104/3326] MIPS: Fix MIPS64 FP save/restore on 32-bit kernels commit 22b8ba765a726d90e9830ff6134c32b04f12c10f upstream. 32-bit kernels can be configured to support MIPS64, in which case neither CONFIG_64BIT or CONFIG_CPU_MIPS32_R* will be set. This causes the CP0_Status.FR checks at the point of floating point register save and restore to be compiled out, which results in odd FP registers not being saved or restored to the task or signal context even when CP0_Status.FR is set. Fix the ifdefs to use CONFIG_CPU_MIPSR2 and CONFIG_CPU_MIPSR6, which are enabled for the relevant revisions of either MIPS32 or MIPS64, along with some other CPUs such as Octeon (r2), Loongson1 (r2), XLP (r2), Loongson 3A R2. The suspect code originates from commit 597ce1723e0f ("MIPS: Support for 64-bit FP with O32 binaries") in v3.14, however the code in __enable_fpu() was consistent and refused to set FR=1, falling back to software FPU emulation. This was suboptimal but should be functionally correct. Commit fcc53b5f6c38 ("MIPS: fpu.h: Allow 64-bit FPU on a 64-bit MIPS R6 CPU") in v4.2 (and stable tagged back to 4.0) later introduced the bug by updating __enable_fpu() to set FR=1 but failing to update the other similar ifdefs to enable FR=1 state handling. Fixes: fcc53b5f6c38 ("MIPS: fpu.h: Allow 64-bit FPU on a 64-bit MIPS R6 CPU") Signed-off-by: James Hogan Cc: Ralf Baechle Cc: Paul Burton Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16739/ Signed-off-by: Greg Kroah-Hartman --- arch/mips/include/asm/asmmacro.h | 8 ++++---- arch/mips/kernel/r4k_fpu.S | 20 ++++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 8333ce90b1726e..feb069cbf44e8a 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -130,8 +130,8 @@ .endm .macro fpu_save_double thread status tmp -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) sll \tmp, \status, 5 bgez \tmp, 10f fpu_save_16odd \thread @@ -189,8 +189,8 @@ .endm .macro fpu_restore_double thread status tmp -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) sll \tmp, \status, 5 bgez \tmp, 10f # 16 register mode? diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 0a83b1708b3cbe..8e3a6020c61345 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -40,8 +40,8 @@ */ LEAF(_save_fp) EXPORT_SYMBOL(_save_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) mfc0 t0, CP0_STATUS #endif fpu_save_double a0 t0 t1 # clobbers t1 @@ -52,8 +52,8 @@ EXPORT_SYMBOL(_save_fp) * Restore a thread's fp context. */ LEAF(_restore_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) mfc0 t0, CP0_STATUS #endif fpu_restore_double a0 t0 t1 # clobbers t1 @@ -246,11 +246,11 @@ LEAF(_save_fp_context) cfc1 t1, fcr31 .set pop -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) .set push SET_HARDFLOAT -#ifdef CONFIG_CPU_MIPS32_R2 +#ifdef CONFIG_CPU_MIPSR2 .set mips32r2 .set fp=64 mfc0 t0, CP0_STATUS @@ -314,11 +314,11 @@ LEAF(_save_fp_context) LEAF(_restore_fp_context) EX lw t1, 0(a1) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) .set push SET_HARDFLOAT -#ifdef CONFIG_CPU_MIPS32_R2 +#ifdef CONFIG_CPU_MIPSR2 .set mips32r2 .set fp=64 mfc0 t0, CP0_STATUS From 43bce9f2eb96e6dd87dbb5850138960a6265b11a Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 5 Nov 2017 14:30:52 +0900 Subject: [PATCH 0105/3326] MIPS: dts: remove bogus bcm96358nb4ser.dtb from dtb-y entry commit 3cad14d56adbf7d621fc5a35db42f3acc0a2d6e8 upstream. arch/mips/boot/dts/brcm/bcm96358nb4ser.dts does not exist, so we cannot build bcm96358nb4ser.dtb . Signed-off-by: Masahiro Yamada Fixes: 695835511f96 ("MIPS: BMIPS: rename bcm96358nb4ser to bcm6358-neufbox4-sercom") Acked-by: James Hogan Signed-off-by: Rob Herring Signed-off-by: Greg Kroah-Hartman --- arch/mips/boot/dts/brcm/Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile index 9e09cc4556b387..39899431236130 100644 --- a/arch/mips/boot/dts/brcm/Makefile +++ b/arch/mips/boot/dts/brcm/Makefile @@ -23,7 +23,6 @@ dtb-$(CONFIG_DT_NONE) += \ bcm63268-comtrend-vr-3032u.dtb \ bcm93384wvg.dtb \ bcm93384wvg_viper.dtb \ - bcm96358nb4ser.dtb \ bcm96368mvwg.dtb \ bcm9ejtagprb.dtb \ bcm97125cbmb.dtb \ From dc3aceed47c35bfc5bcf67efea75c1109c7f6e20 Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Tue, 7 Nov 2017 19:09:20 +0000 Subject: [PATCH 0106/3326] MIPS: Fix an n32 core file generation regset support regression commit 547da673173de51f73887377eb275304775064ad upstream. Fix a commit 7aeb753b5353 ("MIPS: Implement task_user_regset_view.") regression, then activated by commit 6a9c001b7ec3 ("MIPS: Switch ELF core dumper to use regsets.)", that caused n32 processes to dump o32 core files by failing to set the EF_MIPS_ABI2 flag in the ELF core file header's `e_flags' member: $ file tls-core tls-core: ELF 32-bit MSB executable, MIPS, N32 MIPS64 rel2 version 1 (SYSV), [...] $ ./tls-core Aborted (core dumped) $ file core core: ELF 32-bit MSB core file MIPS, MIPS-I version 1 (SYSV), SVR4-style $ Previously the flag was set as the result of a: statement placed in arch/mips/kernel/binfmt_elfn32.c, however in the regset case, i.e. when CORE_DUMP_USE_REGSET is set, ELF_CORE_EFLAGS is no longer used by `fill_note_info' in fs/binfmt_elf.c, and instead the `->e_flags' member of the regset view chosen is. We have the views defined in arch/mips/kernel/ptrace.c, however only an o32 and an n64 one, and the latter is used for n32 as well. Consequently an o32 core file is incorrectly dumped from n32 processes (the ELF32 vs ELF64 class is chosen elsewhere, and the 32-bit one is correctly selected for n32). Correct the issue then by defining an n32 regset view and using it as appropriate. Issue discovered in GDB testing. Fixes: 7aeb753b5353 ("MIPS: Implement task_user_regset_view.") Signed-off-by: Maciej W. Rozycki Cc: Ralf Baechle Cc: Djordje Todorovic Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/17617/ Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/ptrace.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 1395654cfc8d83..5a09c2901a7605 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -618,6 +618,19 @@ static const struct user_regset_view user_mips64_view = { .n = ARRAY_SIZE(mips64_regsets), }; +#ifdef CONFIG_MIPS32_N32 + +static const struct user_regset_view user_mipsn32_view = { + .name = "mipsn32", + .e_flags = EF_MIPS_ABI2, + .e_machine = ELF_ARCH, + .ei_osabi = ELF_OSABI, + .regsets = mips64_regsets, + .n = ARRAY_SIZE(mips64_regsets), +}; + +#endif /* CONFIG_MIPS32_N32 */ + #endif /* CONFIG_64BIT */ const struct user_regset_view *task_user_regset_view(struct task_struct *task) @@ -628,6 +641,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) #ifdef CONFIG_MIPS32_O32 if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) return &user_mips_view; +#endif +#ifdef CONFIG_MIPS32_N32 + if (test_tsk_thread_flag(task, TIF_32BIT_ADDR)) + return &user_mipsn32_view; #endif return &user_mips64_view; #endif From 8d187fa8e9b56a8516291df055d956ca32d453ee Mon Sep 17 00:00:00 2001 From: Mirko Parthey Date: Thu, 18 May 2017 21:30:03 +0200 Subject: [PATCH 0107/3326] MIPS: BCM47XX: Fix LED inversion for WRT54GSv1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 56a46acf62af5ba44fca2f3f1c7c25a2d5385b19 upstream. The WLAN LED on the Linksys WRT54GSv1 is active low, but the software treats it as active high. Fix the inverted logic. Fixes: 7bb26b169116 ("MIPS: BCM47xx: Fix LEDs on WRT54GS V1.0") Signed-off-by: Mirko Parthey Looks-ok-by: Rafał Miłecki Cc: Hauke Mehrtens Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16071/ Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/bcm47xx/leds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c index d4f2407a42c60c..8307a8a0266724 100644 --- a/arch/mips/bcm47xx/leds.c +++ b/arch/mips/bcm47xx/leds.c @@ -331,7 +331,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = { /* Verified on: WRT54GS V1.0 */ static const struct gpio_led bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = { - BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF), BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON), BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF), }; From 085d66519c3e643f15b91ed0ece025200a2257a5 Mon Sep 17 00:00:00 2001 From: Aleksandar Markovic Date: Thu, 2 Nov 2017 12:13:58 +0100 Subject: [PATCH 0108/3326] MIPS: math-emu: Fix final emulation phase for certain instructions commit 409fcace9963c1e8d2cb0f7ac62e8b34d47ef979 upstream. Fix final phase of . emulation. Provide proper generation of SIGFPE signal and updating debugfs FP exception stats in cases of any exception flags set in preceding phases of emulation. CLASS. instruction may generate "Unimplemented Operation" FP exception. . instructions may generate "Inexact", "Unimplemented Operation", "Invalid Operation", "Overflow", and "Underflow" FP exceptions. . instructions can generate "Unimplemented Operation" and "Invalid Operation" FP exceptions. The proper final processing of the cases when any FP exception flag is set is achieved by replacing "break" statement with "goto copcsr" statement. With such solution, this patch brings the final phase of emulation of the above instructions consistent with the one corresponding to the previously implemented emulation of other related FPU instructions (ADD, SUB, etc.). Fixes: 38db37ba069f ("MIPS: math-emu: Add support for the MIPS R6 CLASS FPU instruction") Fixes: e24c3bec3e8e ("MIPS: math-emu: Add support for the MIPS R6 MADDF FPU instruction") Fixes: 83d43305a1df ("MIPS: math-emu: Add support for the MIPS R6 MSUBF FPU instruction") Fixes: a79f5f9ba508 ("MIPS: math-emu: Add support for the MIPS R6 MAX{, A} FPU instruction") Fixes: 4e9561b20e2f ("MIPS: math-emu: Add support for the MIPS R6 MIN{, A} FPU instruction") Signed-off-by: Aleksandar Markovic Cc: Ralf Baechle Cc: Douglas Leung Cc: Goran Ferenc Cc: "Maciej W. Rozycki" Cc: Miodrag Dinic Cc: Paul Burton Cc: Petar Jovanovic Cc: Raghu Gandham Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/17581/ Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/math-emu/cp1emu.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 16d9ef5a78c570..6f57212f565945 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -1795,7 +1795,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fd, MIPSInst_FD(ir)); rv.s = ieee754sp_maddf(fd, fs, ft); - break; + goto copcsr; } case fmsubf_op: { @@ -1809,7 +1809,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fd, MIPSInst_FD(ir)); rv.s = ieee754sp_msubf(fd, fs, ft); - break; + goto copcsr; } case frint_op: { @@ -1834,7 +1834,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(fs, MIPSInst_FS(ir)); rv.w = ieee754sp_2008class(fs); rfmt = w_fmt; - break; + goto copcsr; } case fmin_op: { @@ -1847,7 +1847,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmin(fs, ft); - break; + goto copcsr; } case fmina_op: { @@ -1860,7 +1860,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmina(fs, ft); - break; + goto copcsr; } case fmax_op: { @@ -1873,7 +1873,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmax(fs, ft); - break; + goto copcsr; } case fmaxa_op: { @@ -1886,7 +1886,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmaxa(fs, ft); - break; + goto copcsr; } case fabs_op: @@ -2165,7 +2165,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fd, MIPSInst_FD(ir)); rv.d = ieee754dp_maddf(fd, fs, ft); - break; + goto copcsr; } case fmsubf_op: { @@ -2179,7 +2179,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fd, MIPSInst_FD(ir)); rv.d = ieee754dp_msubf(fd, fs, ft); - break; + goto copcsr; } case frint_op: { @@ -2204,7 +2204,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(fs, MIPSInst_FS(ir)); rv.l = ieee754dp_2008class(fs); rfmt = l_fmt; - break; + goto copcsr; } case fmin_op: { @@ -2217,7 +2217,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmin(fs, ft); - break; + goto copcsr; } case fmina_op: { @@ -2230,7 +2230,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmina(fs, ft); - break; + goto copcsr; } case fmax_op: { @@ -2243,7 +2243,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmax(fs, ft); - break; + goto copcsr; } case fmaxa_op: { @@ -2256,7 +2256,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmaxa(fs, ft); - break; + goto copcsr; } case fabs_op: From 769bfea594f5edb92b077b50ee58866a989ca2bb Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Thu, 9 Nov 2017 11:59:24 +0100 Subject: [PATCH 0109/3326] rt2x00usb: mark device removed when get ENOENT usb error commit bfa62a52cad93686bb8d8171ea5288813248a7c6 upstream. ENOENT usb error mean "specified interface or endpoint does not exist or is not enabled". Mark device not present when we encounter this error similar like we do with ENODEV error. Otherwise we can have infinite loop in rt2x00usb_work_rxdone(), because we remove and put again RX entries to the queue infinitely. We can have similar situation when submit urb will fail all the time with other error, so we need consider to limit number of entries processed by rxdone work. But for now, since the patch fixes reproducible soft lockup issue on single processor systems and taken ENOENT error meaning, let apply this fix. Patch adds additional ENOENT check not only in rx kick routine, but also on other places where we check for ENODEV error. Reported-by: Richard Genoud Debugged-by: Richard Genoud Signed-off-by: Stanislaw Gruszka Tested-by: Richard Genoud Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/ralink/rt2x00/rt2x00usb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index e2f4f5778267bc..086aad22743dc6 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -57,7 +57,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev, if (status >= 0) return 0; - if (status == -ENODEV) { + if (status == -ENODEV || status == -ENOENT) { /* Device has disappeared. */ clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); break; @@ -321,7 +321,7 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data) status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { - if (status == -ENODEV) + if (status == -ENODEV || status == -ENOENT) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00lib_dmadone(entry); @@ -410,7 +410,7 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data) status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { - if (status == -ENODEV) + if (status == -ENODEV || status == -ENOENT) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00lib_dmadone(entry); From c1a14af38a50f714fb32581fcd083b49a28c7d41 Mon Sep 17 00:00:00 2001 From: Vitaly Wool Date: Fri, 17 Nov 2017 15:26:16 -0800 Subject: [PATCH 0110/3326] mm/z3fold.c: use kref to prevent page free/compact race commit 5d03a6613957785e94af7a4a6212ad4af66aa5c2 upstream. There is a race in the current z3fold implementation between do_compact() called in a work queue context and the page release procedure when page's kref goes to 0. do_compact() may be waiting for page lock, which is released by release_z3fold_page_locked right before putting the page onto the "stale" list, and then the page may be freed as do_compact() modifies its contents. The mechanism currently implemented to handle that (checking the PAGE_STALE flag) is not reliable enough. Instead, we'll use page's kref counter to guarantee that the page is not released if its compaction is scheduled. It then becomes compaction function's responsibility to decrease the counter and quit immediately if the page was actually freed. Link: http://lkml.kernel.org/r/20171117092032.00ea56f42affbed19f4fcc6c@gmail.com Signed-off-by: Vitaly Wool Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/z3fold.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/mm/z3fold.c b/mm/z3fold.c index b2ba2ba585f3c0..39e19125d6a019 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -404,8 +404,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) WARN_ON(z3fold_page_trylock(zhdr)); else z3fold_page_lock(zhdr); - if (test_bit(PAGE_STALE, &page->private) || - !test_and_clear_bit(NEEDS_COMPACTING, &page->private)) { + if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) { z3fold_page_unlock(zhdr); return; } @@ -413,6 +412,11 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) list_del_init(&zhdr->buddy); spin_unlock(&pool->lock); + if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { + atomic64_dec(&pool->pages_nr); + return; + } + z3fold_compact_page(zhdr); unbuddied = get_cpu_ptr(pool->unbuddied); fchunks = num_free_chunks(zhdr); @@ -753,9 +757,11 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) list_del_init(&zhdr->buddy); spin_unlock(&pool->lock); zhdr->cpu = -1; + kref_get(&zhdr->refcount); do_compact_page(zhdr, true); return; } + kref_get(&zhdr->refcount); queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); z3fold_page_unlock(zhdr); } From 7b7f543793b95fe1d21d520ea5479e72bd11e7af Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 17 Nov 2017 15:29:13 -0800 Subject: [PATCH 0111/3326] autofs: don't fail mount for transient error commit ecc0c469f27765ed1e2b967be0aa17cee1a60b76 upstream. Currently if the autofs kernel module gets an error when writing to the pipe which links to the daemon, then it marks the whole moutpoint as catatonic, and it will stop working. It is possible that the error is transient. This can happen if the daemon is slow and more than 16 requests queue up. If a subsequent process tries to queue a request, and is then signalled, the write to the pipe will return -ERESTARTSYS and autofs will take that as total failure. So change the code to assess -ERESTARTSYS and -ENOMEM as transient failures which only abort the current request, not the whole mountpoint. It isn't a crash or a data corruption, but having autofs mountpoints suddenly stop working is rather inconvenient. Ian said: : And given the problems with a half dozen (or so) user space applications : consuming large amounts of CPU under heavy mount and umount activity this : could happen more easily than we expect. Link: http://lkml.kernel.org/r/87y3norvgp.fsf@notabene.neil.brown.name Signed-off-by: NeilBrown Acked-by: Ian Kent Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/autofs4/waitq.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 4ac49d038bf38f..8fc41705c7cd50 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -81,7 +81,8 @@ static int autofs4_write(struct autofs_sb_info *sbi, spin_unlock_irqrestore(¤t->sighand->siglock, flags); } - return (bytes > 0); + /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */ + return bytes == 0 ? 0 : wr < 0 ? wr : -EIO; } static void autofs4_notify_daemon(struct autofs_sb_info *sbi, @@ -95,6 +96,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, } pkt; struct file *pipe = NULL; size_t pktsz; + int ret; pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n", (unsigned long) wq->wait_queue_token, @@ -169,7 +171,18 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, mutex_unlock(&sbi->wq_mutex); if (autofs4_write(sbi, pipe, &pkt, pktsz)) + switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) { + case 0: + break; + case -ENOMEM: + case -ERESTARTSYS: + /* Just fail this one */ + autofs4_wait_release(sbi, wq->wait_queue_token, ret); + break; + default: autofs4_catatonic_mode(sbi); + break; + } fput(pipe); } From f94782668b5ae6ee496aadfdea2055dad610b0d5 Mon Sep 17 00:00:00 2001 From: Andreas Rohner Date: Fri, 17 Nov 2017 15:29:35 -0800 Subject: [PATCH 0112/3326] nilfs2: fix race condition that causes file system corruption commit 31ccb1f7ba3cfe29631587d451cf5bb8ab593550 upstream. There is a race condition between nilfs_dirty_inode() and nilfs_set_file_dirty(). When a file is opened, nilfs_dirty_inode() is called to update the access timestamp in the inode. It calls __nilfs_mark_inode_dirty() in a separate transaction. __nilfs_mark_inode_dirty() caches the ifile buffer_head in the i_bh field of the inode info structure and marks it as dirty. After some data was written to the file in another transaction, the function nilfs_set_file_dirty() is called, which adds the inode to the ns_dirty_files list. Then the segment construction calls nilfs_segctor_collect_dirty_files(), which goes through the ns_dirty_files list and checks the i_bh field. If there is a cached buffer_head in i_bh it is not marked as dirty again. Since nilfs_dirty_inode() and nilfs_set_file_dirty() use separate transactions, it is possible that a segment construction that writes out the ifile occurs in-between the two. If this happens the inode is not on the ns_dirty_files list, but its ifile block is still marked as dirty and written out. In the next segment construction, the data for the file is written out and nilfs_bmap_propagate() updates the b-tree. Eventually the bmap root is written into the i_bh block, which is not dirty, because it was written out in another segment construction. As a result the bmap update can be lost, which leads to file system corruption. Either the virtual block address points to an unallocated DAT block, or the DAT entry will be reused for something different. The error can remain undetected for a long time. A typical error message would be one of the "bad btree" errors or a warning that a DAT entry could not be found. This bug can be reproduced reliably by a simple benchmark that creates and overwrites millions of 4k files. Link: http://lkml.kernel.org/r/1509367935-3086-2-git-send-email-konishi.ryusuke@lab.ntt.co.jp Signed-off-by: Andreas Rohner Signed-off-by: Ryusuke Konishi Tested-by: Andreas Rohner Tested-by: Ryusuke Konishi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/nilfs2/segment.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 70ded52dc1dd02..50e12956c7377b 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1958,8 +1958,6 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, err, ii->vfs_inode.i_ino); return err; } - mark_buffer_dirty(ibh); - nilfs_mdt_mark_dirty(ifile); spin_lock(&nilfs->ns_inode_lock); if (likely(!ii->i_bh)) ii->i_bh = ibh; @@ -1968,6 +1966,10 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, goto retry; } + // Always redirty the buffer to avoid race condition + mark_buffer_dirty(ii->i_bh); + nilfs_mdt_mark_dirty(ifile); + clear_bit(NILFS_I_QUEUED, &ii->i_state); set_bit(NILFS_I_BUSY, &ii->i_state); list_move_tail(&ii->i_dirty, &sci->sc_dirty_files); From ddf1264ec523c7ac7ae0dcbcaccd03a0a034b1b6 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 29 Oct 2017 06:30:19 -0400 Subject: [PATCH 0113/3326] fscrypt: lock mutex before checking for bounce page pool commit a0b3bc855374c50b5ea85273553485af48caf2f7 upstream. fscrypt_initialize(), which allocates the global bounce page pool when an encrypted file is first accessed, uses "double-checked locking" to try to avoid locking fscrypt_init_mutex. However, it doesn't use any memory barriers, so it's theoretically possible for a thread to observe a bounce page pool which has not been fully initialized. This is a classic bug with "double-checked locking". While "only a theoretical issue" in the latest kernel, in pre-4.8 kernels the pointer that was checked was not even the last to be initialized, so it was easily possible for a crash (NULL pointer dereference) to happen. This was changed only incidentally by the large refactor to use fs/crypto/. Solve both problems in a trivial way that can easily be backported: just always take the mutex. It's theoretically less efficient, but it shouldn't be noticeable in practice as the mutex is only acquired very briefly once per encrypted file. Later I'd like to make this use a helper macro like DO_ONCE(). However, DO_ONCE() runs in atomic context, so we'd need to add a new macro that allows blocking. Signed-off-by: Eric Biggers Signed-off-by: Theodore Ts'o Signed-off-by: Greg Kroah-Hartman --- fs/crypto/crypto.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index c7835df7e7b847..d262a93d9b31cb 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -410,11 +410,8 @@ int fscrypt_initialize(unsigned int cop_flags) { int i, res = -ENOMEM; - /* - * No need to allocate a bounce page pool if there already is one or - * this FS won't use it. - */ - if (cop_flags & FS_CFLG_OWN_PAGES || fscrypt_bounce_page_pool) + /* No need to allocate a bounce page pool if this FS won't use it. */ + if (cop_flags & FS_CFLG_OWN_PAGES) return 0; mutex_lock(&fscrypt_init_mutex); From bc6e896836242678cd85252336e33d77039cdff3 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 22 Aug 2017 23:41:28 +0300 Subject: [PATCH 0114/3326] eCryptfs: use after free in ecryptfs_release_messaging() commit db86be3a12d0b6e5c5b51c2ab2a48f06329cb590 upstream. We're freeing the list iterator so we should be using the _safe() version of hlist_for_each_entry(). Fixes: 88b4a07e6610 ("[PATCH] eCryptfs: Public key transport mechanism") Signed-off-by: Dan Carpenter Signed-off-by: Tyler Hicks Signed-off-by: Greg Kroah-Hartman --- fs/ecryptfs/messaging.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c index 286f10b0363b15..4f457d5c49331c 100644 --- a/fs/ecryptfs/messaging.c +++ b/fs/ecryptfs/messaging.c @@ -442,15 +442,16 @@ void ecryptfs_release_messaging(void) } if (ecryptfs_daemon_hash) { struct ecryptfs_daemon *daemon; + struct hlist_node *n; int i; mutex_lock(&ecryptfs_daemon_hash_mux); for (i = 0; i < (1 << ecryptfs_hash_bits); i++) { int rc; - hlist_for_each_entry(daemon, - &ecryptfs_daemon_hash[i], - euid_chain) { + hlist_for_each_entry_safe(daemon, n, + &ecryptfs_daemon_hash[i], + euid_chain) { rc = ecryptfs_exorcise_daemon(daemon); if (rc) printk(KERN_ERR "%s: Error whilst " From bcae2363e26309c8063384706dc1e546caed8a36 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 6 Nov 2017 21:57:26 -0800 Subject: [PATCH 0115/3326] libceph: don't WARN() if user tries to add invalid key commit b11270853fa3654f08d4a6a03b23ddb220512d8d upstream. The WARN_ON(!key->len) in set_secret() in net/ceph/crypto.c is hit if a user tries to add a key of type "ceph" with an invalid payload as follows (assuming CONFIG_CEPH_LIB=y): echo -e -n '\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \ | keyctl padd ceph desc @s This can be hit by fuzzers. As this is merely bad input and not a kernel bug, replace the WARN_ON() with return -EINVAL. Fixes: 7af3ea189a9a ("libceph: stop allocating a new cipher on every crypto request") Signed-off-by: Eric Biggers Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov Signed-off-by: Greg Kroah-Hartman --- net/ceph/crypto.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index 489610ac1cddad..bf9d079cbafd6e 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c @@ -37,7 +37,9 @@ static int set_secret(struct ceph_crypto_key *key, void *buf) return -ENOTSUPP; } - WARN_ON(!key->len); + if (!key->len) + return -EINVAL; + key->key = kmemdup(buf, key->len, GFP_NOIO); if (!key->key) { ret = -ENOMEM; From e9c80881b383df54cd519e10314dce4c47df8b8a Mon Sep 17 00:00:00 2001 From: Coly Li Date: Fri, 13 Oct 2017 16:35:29 -0700 Subject: [PATCH 0116/3326] bcache: check ca->alloc_thread initialized before wake up it commit 91af8300d9c1d7c6b6a2fd754109e08d4798b8d8 upstream. In bcache code, sysfs entries are created before all resources get allocated, e.g. allocation thread of a cache set. There is posibility for NULL pointer deference if a resource is accessed but which is not initialized yet. Indeed Jorg Bornschein catches one on cache set allocation thread and gets a kernel oops. The reason for this bug is, when bch_bucket_alloc() is called during cache set registration and attaching, ca->alloc_thread is not properly allocated and initialized yet, call wake_up_process() on ca->alloc_thread triggers NULL pointer deference failure. A simple and fast fix is, before waking up ca->alloc_thread, checking whether it is allocated, and only wake up ca->alloc_thread when it is not NULL. Signed-off-by: Coly Li Reported-by: Jorg Bornschein Cc: Kent Overstreet Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/md/bcache/alloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 08035634795c12..c9934139d6096d 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -407,7 +407,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) finish_wait(&ca->set->bucket_wait, &w); out: - wake_up_process(ca->alloc_thread); + if (ca->alloc_thread) + wake_up_process(ca->alloc_thread); trace_bcache_alloc(ca, reserve); From 5c21c3dde403cdf4b9c777e9dbd9399e6eb8e9b6 Mon Sep 17 00:00:00 2001 From: Greg Edwards Date: Tue, 24 Oct 2017 11:21:48 -0600 Subject: [PATCH 0117/3326] fs: guard_bio_eod() needs to consider partitions commit 67f2519fe2903c4041c0e94394d14d372fe51399 upstream. guard_bio_eod() needs to look at the partition capacity, not just the capacity of the whole device, when determining if truncation is necessary. [ 60.268688] attempt to access beyond end of device [ 60.268690] unknown-block(9,1): rw=0, want=67103509, limit=67103506 [ 60.268693] buffer_io_error: 2 callbacks suppressed [ 60.268696] Buffer I/O error on dev md1p7, logical block 4524305, async page read Fixes: 74d46992e0d9 ("block: replace bi_bdev with a gendisk pointer and partitions index") Reviewed-by: Christoph Hellwig Signed-off-by: Greg Edwards Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- fs/buffer.c | 10 +++++++++- include/linux/genhd.h | 1 + 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/fs/buffer.c b/fs/buffer.c index 170df856bdb997..b96f3b98a6ef9f 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3055,8 +3055,16 @@ void guard_bio_eod(int op, struct bio *bio) sector_t maxsector; struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; unsigned truncated_bytes; + struct hd_struct *part; + + rcu_read_lock(); + part = __disk_get_part(bio->bi_disk, bio->bi_partno); + if (part) + maxsector = part_nr_sects_read(part); + else + maxsector = get_capacity(bio->bi_disk); + rcu_read_unlock(); - maxsector = get_capacity(bio->bi_disk); if (!maxsector) return; diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 44790523057f03..5ade8f2a698764 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -243,6 +243,7 @@ static inline dev_t part_devt(struct hd_struct *part) return part_to_dev(part)->devt; } +extern struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno); static inline void disk_put_part(struct hd_struct *part) From 1dd7dd07e875428e5f05ec55d90472ed6089b08e Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Mon, 30 Oct 2017 21:14:56 +0100 Subject: [PATCH 0118/3326] fanotify: fix fsnotify_prepare_user_wait() failure commit f37650f1c7c71cf5180b43229d13b421d81e7170 upstream. If fsnotify_prepare_user_wait() fails, we leave the event on the notification list. Which will result in a warning in fsnotify_destroy_event() and later use-after-free. Instead of adding a new helper to remove the event from the list in this case, I opted to move the prepare/finish up into fanotify_handle_event(). This will allow these to be moved further out into the generic code later, and perhaps let us move to non-sleeping RCU. Reviewed-by: Amir Goldstein Signed-off-by: Miklos Szeredi Fixes: 05f0e38724e8 ("fanotify: Release SRCU lock when waiting for userspace response") Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/notify/fanotify/fanotify.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 09640b54636386..3c7053207297f9 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -65,19 +65,8 @@ static int fanotify_get_response(struct fsnotify_group *group, pr_debug("%s: group=%p event=%p\n", __func__, group, event); - /* - * fsnotify_prepare_user_wait() fails if we race with mark deletion. - * Just let the operation pass in that case. - */ - if (!fsnotify_prepare_user_wait(iter_info)) { - event->response = FAN_ALLOW; - goto out; - } - wait_event(group->fanotify_data.access_waitq, event->response); - fsnotify_finish_user_wait(iter_info); -out: /* userspace responded, convert to something usable */ switch (event->response) { case FAN_ALLOW: @@ -212,9 +201,21 @@ static int fanotify_handle_event(struct fsnotify_group *group, pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, mask); +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS + if (mask & FAN_ALL_PERM_EVENTS) { + /* + * fsnotify_prepare_user_wait() fails if we race with mark + * deletion. Just let the operation pass in that case. + */ + if (!fsnotify_prepare_user_wait(iter_info)) + return 0; + } +#endif + event = fanotify_alloc_event(inode, mask, data); + ret = -ENOMEM; if (unlikely(!event)) - return -ENOMEM; + goto finish; fsn_event = &event->fse; ret = fsnotify_add_event(group, fsn_event, fanotify_merge); @@ -224,7 +225,8 @@ static int fanotify_handle_event(struct fsnotify_group *group, /* Our event wasn't used in the end. Free it. */ fsnotify_destroy_event(group, fsn_event); - return 0; + ret = 0; + goto finish; } #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS @@ -233,6 +235,11 @@ static int fanotify_handle_event(struct fsnotify_group *group, iter_info); fsnotify_destroy_event(group, fsn_event); } +finish: + if (mask & FAN_ALL_PERM_EVENTS) + fsnotify_finish_user_wait(iter_info); +#else +finish: #endif return ret; } From f2122d66edb0a2df551eabce64ed364c8eb2d5da Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 19 Oct 2017 16:47:48 +0200 Subject: [PATCH 0119/3326] isofs: fix timestamps beyond 2027 commit 34be4dbf87fc3e474a842305394534216d428f5d upstream. isofs uses a 'char' variable to load the number of years since 1900 for an inode timestamp. On architectures that use a signed char type by default, this results in an invalid date for anything beyond 2027. This changes the function argument to a 'u8' array, which is defined the same way on all architectures, and unambiguously lets us use years until 2155. This should be backported to all kernels that might still be in use by that date. Signed-off-by: Arnd Bergmann Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/isofs/isofs.h | 2 +- fs/isofs/rock.h | 2 +- fs/isofs/util.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h index 57d4c3e2e94a4d..8e42b4fbefdc35 100644 --- a/fs/isofs/isofs.h +++ b/fs/isofs/isofs.h @@ -107,7 +107,7 @@ static inline unsigned int isonum_733(char *p) /* Ignore bigendian datum due to broken mastering programs */ return get_unaligned_le32(p); } -extern int iso_date(char *, int); +extern int iso_date(u8 *, int); struct inode; /* To make gcc happy */ diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h index ef03625431bbff..ac5cc587d718fe 100644 --- a/fs/isofs/rock.h +++ b/fs/isofs/rock.h @@ -66,7 +66,7 @@ struct RR_PL_s { }; struct stamp { - char time[7]; + __u8 time[7]; /* actually 6 unsigned, 1 signed */ } __attribute__ ((packed)); struct RR_TF_s { diff --git a/fs/isofs/util.c b/fs/isofs/util.c index 42544bf0e2223b..e88dba72166187 100644 --- a/fs/isofs/util.c +++ b/fs/isofs/util.c @@ -16,7 +16,7 @@ * to GMT. Thus we should always be correct. */ -int iso_date(char * p, int flag) +int iso_date(u8 *p, int flag) { int year, month, day, hour, minute, second, tz; int crtime; From f1117628310079bbdd4a0fa951602419f258bead Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 22 Aug 2017 16:00:39 -0400 Subject: [PATCH 0120/3326] btrfs: change how we decide to commit transactions during flushing commit 996478ca9c460886ac147eb0d00e99841b71d31b upstream. Nikolay reported that generic/273 was failing currently with ENOSPC. Turns out this is because we get to the point where the outstanding reservations are greater than the pinned space on the fs. This is a mistake, previously we used the current reservation amount in may_commit_transaction, not the entire outstanding reservation amount. Fix this to find the minimum byte size needed to make progress in flushing, and pass that into may_commit_transaction. From there we can make a smarter decision on whether to commit the transaction or not. This fixes the failure in generic/273. From Nikolai, IOW: when we go to the final stage of deciding whether to do trans commit, instead of passing all the reservations from all tickets we just pass the reservation for the current ticket. Otherwise, in case all reservations exceed pinned space, then we don't commit transaction and fail prematurely. Before we passed num_bytes from flush_space, where num_bytes was the sum of all pending reserverations, but now all we do is take the first ticket and commit the trans if we can satisfy that. Fixes: 957780eb2788 ("Btrfs: introduce ticketed enospc infrastructure") Reported-by: Nikolay Borisov Signed-off-by: Josef Bacik Reviewed-by: Nikolay Borisov Tested-by: Nikolay Borisov [ added Nikolai's comment ] Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/extent-tree.c | 42 ++++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index e2d7e86b51d1b2..08698105fa4a86 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4919,6 +4919,13 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim, } } +struct reserve_ticket { + u64 bytes; + int error; + struct list_head list; + wait_queue_head_t wait; +}; + /** * maybe_commit_transaction - possibly commit the transaction if its ok to * @root - the root we're allocating for @@ -4930,18 +4937,29 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim, * will return -ENOSPC. */ static int may_commit_transaction(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, - u64 bytes, int force) + struct btrfs_space_info *space_info) { + struct reserve_ticket *ticket = NULL; struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; struct btrfs_trans_handle *trans; + u64 bytes; trans = (struct btrfs_trans_handle *)current->journal_info; if (trans) return -EAGAIN; - if (force) - goto commit; + spin_lock(&space_info->lock); + if (!list_empty(&space_info->priority_tickets)) + ticket = list_first_entry(&space_info->priority_tickets, + struct reserve_ticket, list); + else if (!list_empty(&space_info->tickets)) + ticket = list_first_entry(&space_info->tickets, + struct reserve_ticket, list); + bytes = (ticket) ? ticket->bytes : 0; + spin_unlock(&space_info->lock); + + if (!bytes) + return 0; /* See if there is enough pinned space to make this reservation */ if (percpu_counter_compare(&space_info->total_bytes_pinned, @@ -4956,8 +4974,12 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info, return -ENOSPC; spin_lock(&delayed_rsv->lock); + if (delayed_rsv->size > bytes) + bytes = 0; + else + bytes -= delayed_rsv->size; if (percpu_counter_compare(&space_info->total_bytes_pinned, - bytes - delayed_rsv->size) < 0) { + bytes) < 0) { spin_unlock(&delayed_rsv->lock); return -ENOSPC; } @@ -4971,13 +4993,6 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info, return btrfs_commit_transaction(trans); } -struct reserve_ticket { - u64 bytes; - int error; - struct list_head list; - wait_queue_head_t wait; -}; - /* * Try to flush some data based on policy set by @state. This is only advisory * and may fail for various reasons. The caller is supposed to examine the @@ -5027,8 +5042,7 @@ static void flush_space(struct btrfs_fs_info *fs_info, ret = 0; break; case COMMIT_TRANS: - ret = may_commit_transaction(fs_info, space_info, - num_bytes, 0); + ret = may_commit_transaction(fs_info, space_info); break; default: ret = -ENOSPC; From d628ac8abdfc0d3631cb76d6fc1f0b80d988b34f Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 13 Oct 2017 10:27:45 -0700 Subject: [PATCH 0121/3326] f2fs: expose some sectors to user in inline data or dentry case commit 5b4267d195dd887c4412e34b5a7365baa741b679 upstream. If there's some data written through inline data or dentry, we need to shouw st_blocks. This fixes reporting zero blocks even though there is small written data. Reviewed-by: Chao Yu [Jaegeuk Kim: avoid link file for quotacheck] Signed-off-by: Jaegeuk Kim Signed-off-by: Greg Kroah-Hartman --- fs/f2fs/file.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 517e112c8a9a94..6ce4678723762a 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -683,6 +683,12 @@ int f2fs_getattr(const struct path *path, struct kstat *stat, STATX_ATTR_NODUMP); generic_fillattr(inode, stat); + + /* we need to show initial sectors used for inline_data/dentries */ + if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || + f2fs_has_inline_dentry(inode)) + stat->blocks += (stat->size + 511) >> 9; + return 0; } From afaacc000e07b084e37f5b0a0f79ce62a399f0c9 Mon Sep 17 00:00:00 2001 From: Joshua Watt Date: Tue, 7 Nov 2017 16:25:47 -0600 Subject: [PATCH 0122/3326] NFS: Fix typo in nomigration mount option commit f02fee227e5f21981152850744a6084ff3fa94ee upstream. The option was incorrectly masking off all other options. Signed-off-by: Joshua Watt Signed-off-by: Anna Schumaker Signed-off-by: Greg Kroah-Hartman --- fs/nfs/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfs/super.c b/fs/nfs/super.c index c9d24bae302514..216f67d628b3c9 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1332,7 +1332,7 @@ static int nfs_parse_mount_options(char *raw, mnt->options |= NFS_OPTION_MIGRATION; break; case Opt_nomigration: - mnt->options &= NFS_OPTION_MIGRATION; + mnt->options &= ~NFS_OPTION_MIGRATION; break; /* From 57f3c05d030d38355daa001ac28a4c37238e2eb8 Mon Sep 17 00:00:00 2001 From: Benjamin Coddington Date: Fri, 10 Nov 2017 06:27:49 -0500 Subject: [PATCH 0123/3326] NFS: Revert "NFS: Move the flock open mode check into nfs_flock()" commit fcfa447062b2061e11f68b846d61cbfe60d0d604 upstream. Commit e12937279c8b "NFS: Move the flock open mode check into nfs_flock()" changed NFSv3 behavior for flock() such that the open mode must match the lock type, however that requirement shouldn't be enforced for flock(). Signed-off-by: Benjamin Coddington Signed-off-by: Anna Schumaker Signed-off-by: Greg Kroah-Hartman --- fs/nfs/file.c | 18 ++---------------- fs/nfs/nfs4proc.c | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 0214dd1e10602b..81cca49a837506 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -829,23 +829,9 @@ int nfs_flock(struct file *filp, int cmd, struct file_lock *fl) if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK) is_local = 1; - /* - * VFS doesn't require the open mode to match a flock() lock's type. - * NFS, however, may simulate flock() locking with posix locking which - * requires the open mode to match the lock type. - */ - switch (fl->fl_type) { - case F_UNLCK: + /* We're simulating flock() locks using posix locks on the server */ + if (fl->fl_type == F_UNLCK) return do_unlk(filp, cmd, fl, is_local); - case F_RDLCK: - if (!(filp->f_mode & FMODE_READ)) - return -EBADF; - break; - case F_WRLCK: - if (!(filp->f_mode & FMODE_WRITE)) - return -EBADF; - } - return do_setlk(filp, cmd, fl, is_local); } EXPORT_SYMBOL_GPL(nfs_flock); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f90090e8c959b1..f4224eabe7bbef 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6568,6 +6568,20 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) return -ENOLCK; + /* + * Don't rely on the VFS having checked the file open mode, + * since it won't do this for flock() locks. + */ + switch (request->fl_type) { + case F_RDLCK: + if (!(filp->f_mode & FMODE_READ)) + return -EBADF; + break; + case F_WRLCK: + if (!(filp->f_mode & FMODE_WRITE)) + return -EBADF; + } + status = nfs4_set_lock_state(state, request); if (status != 0) return status; From aed1a43399c78d7c329ec39b9a4d352216c40a16 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 5 Nov 2017 15:45:22 -0500 Subject: [PATCH 0124/3326] nfs: Fix ugly referral attributes commit c05cefcc72416a37eba5a2b35f0704ed758a9145 upstream. Before traversing a referral and performing a mount, the mounted-on directory looks strange: dr-xr-xr-x. 2 4294967294 4294967294 0 Dec 31 1969 dir.0 nfs4_get_referral is wiping out any cached attributes with what was returned via GETATTR(fs_locations), but the bit mask for that operation does not request any file attributes. Retrieve owner and timestamp information so that the memcpy in nfs4_get_referral fills in more attributes. Changes since v1: - Don't request attributes that the client unconditionally replaces - Request only MOUNTED_ON_FILEID or FILEID attribute, not both - encode_fs_locations() doesn't use the third bitmask word Fixes: 6b97fd3da1ea ("NFSv4: Follow a referral") Suggested-by: Pradeep Thomas Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker Signed-off-by: Greg Kroah-Hartman --- fs/nfs/nfs4proc.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f4224eabe7bbef..2241d52710f7b0 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -254,15 +254,12 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE }; const u32 nfs4_fs_locations_bitmap[3] = { - FATTR4_WORD0_TYPE - | FATTR4_WORD0_CHANGE + FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID | FATTR4_WORD0_FILEID | FATTR4_WORD0_FS_LOCATIONS, - FATTR4_WORD1_MODE - | FATTR4_WORD1_NUMLINKS - | FATTR4_WORD1_OWNER + FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV | FATTR4_WORD1_SPACE_USED @@ -6777,9 +6774,7 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, struct page *page) { struct nfs_server *server = NFS_SERVER(dir); - u32 bitmask[3] = { - [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, - }; + u32 bitmask[3]; struct nfs4_fs_locations_arg args = { .dir_fh = NFS_FH(dir), .name = name, @@ -6798,12 +6793,15 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, dprintk("%s: start\n", __func__); + bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS; + bitmask[1] = nfs4_fattr_bitmap[1]; + /* Ask for the fileid of the absent filesystem if mounted_on_fileid * is not supported */ if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) - bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; + bitmask[0] &= ~FATTR4_WORD0_FILEID; else - bitmask[0] |= FATTR4_WORD0_FILEID; + bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; nfs_fattr_init(&fs_locations->fattr); fs_locations->server = server; From 2deb89453f74c576e145f4fd21664d175daf6bee Mon Sep 17 00:00:00 2001 From: Anna Schumaker Date: Wed, 1 Nov 2017 15:48:43 -0400 Subject: [PATCH 0125/3326] NFS: Avoid RCU usage in tracepoints commit 3944369db701f075092357b511fd9f5755771585 upstream. There isn't an obvious way to acquire and release the RCU lock during a tracepoint, so we can't use the rpc_peeraddr2str() function here. Instead, rely on the client's cl_hostname, which should have similar enough information without needing an rcu_dereference(). Reported-by: Dave Jones Signed-off-by: Anna Schumaker Signed-off-by: Greg Kroah-Hartman --- fs/nfs/nfs4trace.h | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index e7c6275519b09d..71d2ca04a9f87d 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -202,17 +202,13 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event, TP_ARGS(clp, error), TP_STRUCT__entry( - __string(dstaddr, - rpc_peeraddr2str(clp->cl_rpcclient, - RPC_DISPLAY_ADDR)) + __string(dstaddr, clp->cl_hostname) __field(int, error) ), TP_fast_assign( __entry->error = error; - __assign_str(dstaddr, - rpc_peeraddr2str(clp->cl_rpcclient, - RPC_DISPLAY_ADDR)); + __assign_str(dstaddr, clp->cl_hostname); ), TP_printk( @@ -1133,9 +1129,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event, __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) - __string(dstaddr, clp ? - rpc_peeraddr2str(clp->cl_rpcclient, - RPC_DISPLAY_ADDR) : "unknown") + __string(dstaddr, clp ? clp->cl_hostname : "unknown") ), TP_fast_assign( @@ -1148,9 +1142,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event, __entry->fileid = 0; __entry->dev = 0; } - __assign_str(dstaddr, clp ? - rpc_peeraddr2str(clp->cl_rpcclient, - RPC_DISPLAY_ADDR) : "unknown") + __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown") ), TP_printk( @@ -1192,9 +1184,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event, __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) - __string(dstaddr, clp ? - rpc_peeraddr2str(clp->cl_rpcclient, - RPC_DISPLAY_ADDR) : "unknown") + __string(dstaddr, clp ? clp->cl_hostname : "unknown") __field(int, stateid_seq) __field(u32, stateid_hash) ), @@ -1209,9 +1199,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event, __entry->fileid = 0; __entry->dev = 0; } - __assign_str(dstaddr, clp ? - rpc_peeraddr2str(clp->cl_rpcclient, - RPC_DISPLAY_ADDR) : "unknown") + __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown") __entry->stateid_seq = be32_to_cpu(stateid->seqid); __entry->stateid_hash = From 5756707370240737fc8fca120c6e2b81c6b348c1 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 25 Aug 2017 17:34:41 +1000 Subject: [PATCH 0126/3326] NFS: revalidate "." etc correctly on "open". commit b688741cb06695312f18b730653d6611e1bad28d upstream. For correct close-to-open semantics, NFS must validate the change attribute of a directory (or file) on open. Since commit ecf3d1f1aa74 ("vfs: kill FS_REVAL_DOT by adding a d_weak_revalidate dentry op"), open() of "." or a path ending ".." is not revalidated reliably (except when that direct is a mount point). Prior to that commit, "." was revalidated using nfs_lookup_revalidate() which checks the LOOKUP_OPEN flag and forces revalidation if the flag is set. Since that commit, nfs_weak_revalidate() is used for NFSv3 (which ignores the flags) and nothing is used for NFSv4. This is fixed by using nfs_lookup_verify_inode() in nfs_weak_revalidate(). This does the revalidation exactly when needed. Also, add a definition of .d_weak_revalidate for NFSv4. The incorrect behavior is easily demonstrated by running "echo *" in some non-mountpoint NFS directory while watching network traffic. Without this patch, "echo *" sometimes doesn't produce any traffic. With the patch it always does. Fixes: ecf3d1f1aa74 ("vfs: kill FS_REVAL_DOT by adding a d_weak_revalidate dentry op") Signed-off-by: NeilBrown Signed-off-by: Anna Schumaker Signed-off-by: Greg Kroah-Hartman --- fs/nfs/dir.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 5ceaeb1f6fb69d..b03b3bc05f9615 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1241,8 +1241,7 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags) return 0; } - if (nfs_mapping_need_revalidate_inode(inode)) - error = __nfs_revalidate_inode(NFS_SERVER(inode), inode); + error = nfs_lookup_verify_inode(inode, flags); dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n", __func__, inode->i_ino, error ? "invalid" : "valid"); return !error; @@ -1393,6 +1392,7 @@ static int nfs4_lookup_revalidate(struct dentry *, unsigned int); const struct dentry_operations nfs4_dentry_operations = { .d_revalidate = nfs4_lookup_revalidate, + .d_weak_revalidate = nfs_weak_revalidate, .d_delete = nfs_dentry_delete, .d_iput = nfs_dentry_iput, .d_automount = nfs_d_automount, From 584f0bb5681a1ed7e05b30aabab2764d55cbc3cb Mon Sep 17 00:00:00 2001 From: Andrew Elble Date: Fri, 3 Nov 2017 14:06:31 -0400 Subject: [PATCH 0127/3326] nfsd: deal with revoked delegations appropriately commit 95da1b3a5aded124dd1bda1e3cdb876184813140 upstream. If a delegation has been revoked by the server, operations using that delegation should error out with NFS4ERR_DELEG_REVOKED in the >4.1 case, and NFS4ERR_BAD_STATEID otherwise. The server needs NFSv4.1 clients to explicitly free revoked delegations. If the server returns NFS4ERR_DELEG_REVOKED, the client will do that; otherwise it may just forget about the delegation and be unable to recover when it later sees SEQ4_STATUS_RECALLABLE_STATE_REVOKED set on a SEQUENCE reply. That can cause the Linux 4.1 client to loop in its stage manager. Signed-off-by: Andrew Elble Reviewed-by: Trond Myklebust Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/nfs4state.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 0c04f81aa63b22..d386d569edbc41 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3966,7 +3966,8 @@ static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, statei { struct nfs4_stid *ret; - ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID); + ret = find_stateid_by_type(cl, s, + NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID); if (!ret) return NULL; return delegstateid(ret); @@ -3989,6 +3990,12 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); if (deleg == NULL) goto out; + if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) { + nfs4_put_stid(&deleg->dl_stid); + if (cl->cl_minorversion) + status = nfserr_deleg_revoked; + goto out; + } flags = share_access_to_flags(open->op_share_access); status = nfs4_check_delegmode(deleg, flags); if (status) { @@ -4858,6 +4865,16 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, struct nfs4_stid **s, struct nfsd_net *nn) { __be32 status; + bool return_revoked = false; + + /* + * only return revoked delegations if explicitly asked. + * otherwise we report revoked or bad_stateid status. + */ + if (typemask & NFS4_REVOKED_DELEG_STID) + return_revoked = true; + else if (typemask & NFS4_DELEG_STID) + typemask |= NFS4_REVOKED_DELEG_STID; if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) return nfserr_bad_stateid; @@ -4872,6 +4889,12 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, *s = find_stateid_by_type(cstate->clp, stateid, typemask); if (!*s) return nfserr_bad_stateid; + if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) { + nfs4_put_stid(*s); + if (cstate->minorversion) + return nfserr_deleg_revoked; + return nfserr_bad_stateid; + } return nfs_ok; } From 9f724960c581097e6a84a2f4b9c77a9f17a372cc Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 14 Sep 2017 13:17:44 -0500 Subject: [PATCH 0128/3326] rtlwifi: rtl8192ee: Fix memory leak when loading firmware commit 519ce2f933fa14acf69d5c8cabcc18711943d629 upstream. In routine rtl92ee_set_fw_rsvdpagepkt(), the driver allocates an skb, but never calls rtl_cmd_send_packet(), which will free the buffer. All other rtlwifi drivers perform this operation correctly. This problem has been in the driver since it was included in the kernel. Fortunately, each firmware load only leaks 4 buffers, which likely explains why it has not previously been detected. Signed-off-by: Larry Finger Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 7eae27f8e173fd..f9563ae301ad28 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -682,7 +682,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct sk_buff *skb = NULL; - + bool rtstatus; u32 totalpacketlen; u8 u1rsvdpageloc[5] = { 0 }; bool b_dlok = false; @@ -768,7 +768,9 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) skb = dev_alloc_skb(totalpacketlen); skb_put_data(skb, &reserved_page_packet, totalpacketlen); - b_dlok = true; + rtstatus = rtl_cmd_send_packet(hw, skb); + if (rtstatus) + b_dlok = true; if (b_dlok) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD , From 404dcc55b001e2d56d75fa0ea65635963f16fe68 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 6 Nov 2017 14:55:35 +0100 Subject: [PATCH 0129/3326] rtlwifi: fix uninitialized rtlhal->last_suspend_sec time commit 3f2a162fab15aee243178b5308bb5d1206fc4043 upstream. We set rtlhal->last_suspend_sec to an uninitialized stack variable, but unfortunately gcc never warned about this, I only found it while working on another patch. I opened a gcc bug for this. Presumably the value of rtlhal->last_suspend_sec is not all that important, but it does get used, so we probably want the patch backported to stable kernels. Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82839 Signed-off-by: Arnd Bergmann Acked-by: Larry Finger Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 1d431d4bf6d261..9ac1511de7ba2f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -1372,6 +1372,7 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw) ppsc->wakeup_reason = 0; + do_gettimeofday(&ts); rtlhal->last_suspend_sec = ts.tv_sec; switch (fw_reason) { From 55e357bc316bc3f46e96bc55415cb7d512c8db6f Mon Sep 17 00:00:00 2001 From: Thomas Backlund Date: Tue, 14 Nov 2017 12:37:51 +0200 Subject: [PATCH 0130/3326] iwlwifi: fix firmware names for 9000 and A000 series hw commit c2c48ddfc8b03b9ecb51d2832b586497b37531bc upstream. iwlwifi 9000 and a0000 series hw contains an extra dash in firmware file name as seeen in modinfo output for kernel 4.14: firmware: iwlwifi-9260-th-b0-jf-b0--34.ucode firmware: iwlwifi-9260-th-a0-jf-a0--34.ucode firmware: iwlwifi-9000-pu-a0-jf-b0--34.ucode firmware: iwlwifi-9000-pu-a0-jf-a0--34.ucode firmware: iwlwifi-QuQnj-a0-hr-a0--34.ucode firmware: iwlwifi-QuQnj-a0-jf-b0--34.ucode firmware: iwlwifi-QuQnj-f0-hr-a0--34.ucode firmware: iwlwifi-Qu-a0-jf-b0--34.ucode firmware: iwlwifi-Qu-a0-hr-a0--34.ucode Fix that by dropping the extra adding of '"-"'. Signed-off-by: Thomas Backlund Signed-off-by: Luca Coelho Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/cfg/9000.c | 6 +++--- drivers/net/wireless/intel/iwlwifi/cfg/a000.c | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index e8b5ff42f5a8f3..6f43faab491098 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -79,11 +79,11 @@ #define IWL9000_MODULE_FIRMWARE(api) \ IWL9000_FW_PRE "-" __stringify(api) ".ucode" #define IWL9000RFB_MODULE_FIRMWARE(api) \ - IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode" + IWL9000RFB_FW_PRE __stringify(api) ".ucode" #define IWL9260A_MODULE_FIRMWARE(api) \ - IWL9260A_FW_PRE "-" __stringify(api) ".ucode" + IWL9260A_FW_PRE __stringify(api) ".ucode" #define IWL9260B_MODULE_FIRMWARE(api) \ - IWL9260B_FW_PRE "-" __stringify(api) ".ucode" + IWL9260B_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_9000 10 diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c index a440140ed8dda3..7eade165b7472c 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c @@ -80,15 +80,15 @@ #define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" #define IWL_A000_HR_MODULE_FIRMWARE(api) \ - IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_HR_FW_PRE __stringify(api) ".ucode" #define IWL_A000_JF_MODULE_FIRMWARE(api) \ - IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_JF_FW_PRE __stringify(api) ".ucode" #define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode" #define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode" #define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_A000 10 From 459aad50a56bfca28cc41408448d08abdd7e17ae Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 5 Oct 2017 16:23:16 +1100 Subject: [PATCH 0131/3326] md: fix deadlock error in recent patch. commit d47c8ad261f787af22a220ffcc2d07afba809223 upstream. A recent patch aimed to cause md_write_start() to fail (rather than block) when the mddev was suspending, so as to avoid deadlocks. Unfortunately the test in wait_event() was wrong, and it didn't change behaviour at all. We wait_event() must wait until the metadata is written OR the array is suspending. Fixes: cc27b0c78c79 ("md: fix deadlock between mddev_suspend() and md_write_start()") Reported-by: Xiao Ni Signed-off-by: NeilBrown Signed-off-by: Shaohua Li Signed-off-by: Greg Kroah-Hartman --- drivers/md/md.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index 0ff1bbf6c90e5c..8b2eb0f4122fb7 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -8039,7 +8039,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi) if (did_change) sysfs_notify_dirent_safe(mddev->sysfs_state); wait_event(mddev->sb_wait, - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended); + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || + mddev->suspended); if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { percpu_ref_put(&mddev->writes_pending); return false; From 7cd7a7aa01203bc4aebf6b89e09d39148f9b0bce Mon Sep 17 00:00:00 2001 From: Artur Paszkiewicz Date: Thu, 26 Oct 2017 15:56:54 +0200 Subject: [PATCH 0132/3326] md: don't check MD_SB_CHANGE_CLEAN in md_allow_write commit b90f6ff080c52e2f05364210733df120e3c4e597 upstream. Only MD_SB_CHANGE_PENDING should be used to wait for transition from clean to dirty. Checking also MD_SB_CHANGE_CLEAN is unnecessary and can race with e.g. md_do_sync(). This sporadically causes a hang when changing consistency policy during resync: INFO: task mdadm:6183 blocked for more than 30 seconds. Not tainted 4.14.0-rc3+ #391 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. mdadm D12752 6183 6022 0x00000000 Call Trace: __schedule+0x93f/0x990 schedule+0x6b/0x90 md_allow_write+0x100/0x130 [md_mod] ? do_wait_intr_irq+0x90/0x90 resize_stripes+0x3a/0x5b0 [raid456] ? kernfs_fop_write+0xbe/0x180 raid5_change_consistency_policy+0xa6/0x200 [raid456] consistency_policy_store+0x2e/0x70 [md_mod] md_attr_store+0x90/0xc0 [md_mod] sysfs_kf_write+0x42/0x50 kernfs_fop_write+0x119/0x180 __vfs_write+0x28/0x110 ? rcu_sync_lockdep_assert+0x12/0x60 ? __sb_start_write+0x15a/0x1c0 ? vfs_write+0xa3/0x1a0 vfs_write+0xb4/0x1a0 SyS_write+0x49/0xa0 entry_SYSCALL_64_fastpath+0x18/0xad Fixes: 2214c260c72b ("md: don't return -EAGAIN in md_allow_write for external metadata arrays") Signed-off-by: Artur Paszkiewicz Signed-off-by: Shaohua Li Signed-off-by: Greg Kroah-Hartman --- drivers/md/md.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index 8b2eb0f4122fb7..e019cf8c0d131a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -8111,7 +8111,6 @@ void md_allow_write(struct mddev *mddev) sysfs_notify_dirent_safe(mddev->sysfs_state); /* wait for the dirty state to be recorded in the metadata */ wait_event(mddev->sb_wait, - !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) && !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); } else spin_unlock(&mddev->lock); From 5912d9ca14f93b7f10bdf75067d1d9cef4c161a6 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Tue, 5 Sep 2017 12:26:03 +0200 Subject: [PATCH 0133/3326] Bluetooth: btqcomsmd: Add support for BD address setup commit 6e518111060c2290427d79c43d4add9600ad852b upstream. This patch implements the hdev setup function since wcnss-bt does not have persistent memory to store an allocated BD address. The device is therefore marked as unconfigured if no BD address has been previously retrieved. Signed-off-by: Loic Poulain Signed-off-by: Marcel Holtmann Signed-off-by: Greg Kroah-Hartman --- drivers/bluetooth/btqcomsmd.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c index d00c4fdae9243e..bd810d01538abb 100644 --- a/drivers/bluetooth/btqcomsmd.c +++ b/drivers/bluetooth/btqcomsmd.c @@ -26,6 +26,7 @@ struct btqcomsmd { struct hci_dev *hdev; + bdaddr_t bdaddr; struct rpmsg_endpoint *acl_channel; struct rpmsg_endpoint *cmd_channel; }; @@ -100,6 +101,38 @@ static int btqcomsmd_close(struct hci_dev *hdev) return 0; } +static int btqcomsmd_setup(struct hci_dev *hdev) +{ + struct btqcomsmd *btq = hci_get_drvdata(hdev); + struct sk_buff *skb; + int err; + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + kfree_skb(skb); + + /* Devices do not have persistent storage for BD address. If no + * BD address has been retrieved during probe, mark the device + * as having an invalid BD address. + */ + if (!bacmp(&btq->bdaddr, BDADDR_ANY)) { + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + return 0; + } + + /* When setting a configured BD address fails, mark the device + * as having an invalid BD address. + */ + err = qca_set_bdaddr_rome(hdev, &btq->bdaddr); + if (err) { + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + return 0; + } + + return 0; +} + static int btqcomsmd_probe(struct platform_device *pdev) { struct btqcomsmd *btq; @@ -135,6 +168,7 @@ static int btqcomsmd_probe(struct platform_device *pdev) hdev->open = btqcomsmd_open; hdev->close = btqcomsmd_close; hdev->send = btqcomsmd_send; + hdev->setup = btqcomsmd_setup; hdev->set_bdaddr = qca_set_bdaddr_rome; ret = hci_register_dev(hdev); From a6ff2fb417898b57e5f81349731855763260b266 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 16 Oct 2017 19:03:44 -0700 Subject: [PATCH 0134/3326] md/bitmap: revert a patch commit 938b533d479e7428b7fa1b8179283646d2e2c53d upstream. This reverts commit 8031c3ddc70a. That patches doesn't work well if PAGE_SIZE > 4k. We will fix the original problem with a different approach. Fix: 8031c3ddc70a(md/bitmap: copy correct data for bitmap super) Reported-by: Joshua Kinard Suggested-by: Neil Brown Signed-off-by: Shaohua Li Signed-off-by: Greg Kroah-Hartman --- drivers/md/bitmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index d2121637b4abc2..cae57b5be817a2 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -625,7 +625,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) err = read_sb_page(bitmap->mddev, offset, sb_page, - 0, PAGE_SIZE); + 0, sizeof(bitmap_super_t)); } if (err) return err; @@ -2123,7 +2123,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, if (store.sb_page && bitmap->storage.sb_page) memcpy(page_address(store.sb_page), page_address(bitmap->storage.sb_page), - PAGE_SIZE); + sizeof(bitmap_super_t)); bitmap_file_unmap(&bitmap->storage); bitmap->storage = store; From 47b02dcac666c310705621187557a84f0176d0f9 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Mon, 30 Oct 2017 21:14:55 +0100 Subject: [PATCH 0135/3326] fsnotify: clean up fsnotify_prepare/finish_user_wait() commit 24c20305c7fc8959836211cb8c50aab93ae0e54f upstream. This patch doesn't actually fix any bug, just paves the way for fixing mark and group pinning. Reviewed-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/notify/mark.c | 96 ++++++++++++++++++++++++------------------------ 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 9991f88267342f..0ba789e1cc21a7 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -109,16 +109,6 @@ void fsnotify_get_mark(struct fsnotify_mark *mark) atomic_inc(&mark->refcnt); } -/* - * Get mark reference when we found the mark via lockless traversal of object - * list. Mark can be already removed from the list by now and on its way to be - * destroyed once SRCU period ends. - */ -static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark) -{ - return atomic_inc_not_zero(&mark->refcnt); -} - static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) { u32 new_mask = 0; @@ -256,32 +246,63 @@ void fsnotify_put_mark(struct fsnotify_mark *mark) FSNOTIFY_REAPER_DELAY); } -bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info) +/* + * Get mark reference when we found the mark via lockless traversal of object + * list. Mark can be already removed from the list by now and on its way to be + * destroyed once SRCU period ends. + * + * Also pin the group so it doesn't disappear under us. + */ +static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark) { struct fsnotify_group *group; - if (WARN_ON_ONCE(!iter_info->inode_mark && !iter_info->vfsmount_mark)) - return false; - - if (iter_info->inode_mark) - group = iter_info->inode_mark->group; - else - group = iter_info->vfsmount_mark->group; + if (!mark) + return true; + group = mark->group; /* * Since acquisition of mark reference is an atomic op as well, we can * be sure this inc is seen before any effect of refcount increment. */ atomic_inc(&group->user_waits); + if (atomic_inc_not_zero(&mark->refcnt)) + return true; + + if (atomic_dec_and_test(&group->user_waits) && group->shutdown) + wake_up(&group->notification_waitq); + + return false; +} + +/* + * Puts marks and wakes up group destruction if necessary. + * + * Pairs with fsnotify_get_mark_safe() + */ +static void fsnotify_put_mark_wake(struct fsnotify_mark *mark) +{ + if (mark) { + struct fsnotify_group *group = mark->group; - if (iter_info->inode_mark) { - /* This can fail if mark is being removed */ - if (!fsnotify_get_mark_safe(iter_info->inode_mark)) - goto out_wait; + fsnotify_put_mark(mark); + /* + * We abuse notification_waitq on group shutdown for waiting for + * all marks pinned when waiting for userspace. + */ + if (atomic_dec_and_test(&group->user_waits) && group->shutdown) + wake_up(&group->notification_waitq); } - if (iter_info->vfsmount_mark) { - if (!fsnotify_get_mark_safe(iter_info->vfsmount_mark)) - goto out_inode; +} + +bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info) +{ + /* This can fail if mark is being removed */ + if (!fsnotify_get_mark_safe(iter_info->inode_mark)) + return false; + if (!fsnotify_get_mark_safe(iter_info->vfsmount_mark)) { + fsnotify_put_mark_wake(iter_info->inode_mark); + return false; } /* @@ -292,34 +313,13 @@ bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info) srcu_read_unlock(&fsnotify_mark_srcu, iter_info->srcu_idx); return true; -out_inode: - if (iter_info->inode_mark) - fsnotify_put_mark(iter_info->inode_mark); -out_wait: - if (atomic_dec_and_test(&group->user_waits) && group->shutdown) - wake_up(&group->notification_waitq); - return false; } void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info) { - struct fsnotify_group *group = NULL; - iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); - if (iter_info->inode_mark) { - group = iter_info->inode_mark->group; - fsnotify_put_mark(iter_info->inode_mark); - } - if (iter_info->vfsmount_mark) { - group = iter_info->vfsmount_mark->group; - fsnotify_put_mark(iter_info->vfsmount_mark); - } - /* - * We abuse notification_waitq on group shutdown for waiting for all - * marks pinned when waiting for userspace. - */ - if (atomic_dec_and_test(&group->user_waits) && group->shutdown) - wake_up(&group->notification_waitq); + fsnotify_put_mark_wake(iter_info->inode_mark); + fsnotify_put_mark_wake(iter_info->vfsmount_mark); } /* From 9e9569f05ea699e2453f25fed562b3cb969b8aef Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Mon, 30 Oct 2017 21:14:55 +0100 Subject: [PATCH 0136/3326] fsnotify: pin both inode and vfsmount mark commit 0d6ec079d6aaa098b978d6395973bb027c752a03 upstream. We may fail to pin one of the marks in fsnotify_prepare_user_wait() when dropping the srcu read lock, resulting in use after free at the next iteration. Solution is to store both marks in iter_info instead of just the one we'll be sending the event for. Reviewed-by: Amir Goldstein Signed-off-by: Miklos Szeredi Fixes: 9385a84d7e1f ("fsnotify: Pass fsnotify_iter_info into handle_event handler") Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/notify/fsnotify.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 0c4583b6171764..0747162938296d 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -335,6 +335,13 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, struct fsnotify_mark, obj_list); vfsmount_group = vfsmount_mark->group; } + /* + * Need to protect both marks against freeing so that we can + * continue iteration from this place, regardless of which mark + * we actually happen to send an event for. + */ + iter_info.inode_mark = inode_mark; + iter_info.vfsmount_mark = vfsmount_mark; if (inode_group && vfsmount_group) { int cmp = fsnotify_compare_groups(inode_group, @@ -348,9 +355,6 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, } } - iter_info.inode_mark = inode_mark; - iter_info.vfsmount_mark = vfsmount_mark; - ret = send_to_group(to_tell, inode_mark, vfsmount_mark, mask, data, data_is, cookie, file_name, &iter_info); From 2f5be981622190fa85ba3eed31e974d73a2cae71 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Mon, 30 Oct 2017 21:14:56 +0100 Subject: [PATCH 0137/3326] fsnotify: fix pinning group in fsnotify_prepare_user_wait() commit 9a31d7ad997f55768c687974ce36b759065b49e5 upstream. Blind increment of group's user_waits is not enough, we could be far enough in the group's destruction that it isn't taken into account (i.e. grabbing the mark ref afterwards doesn't guarantee that it was the ref coming from the _group_ that was grabbed). Instead we need to check (under lock) that the mark is still attached to the group after having obtained a ref to the mark. If not, skip it. Reviewed-by: Amir Goldstein Signed-off-by: Miklos Szeredi Fixes: 9385a84d7e1f ("fsnotify: Pass fsnotify_iter_info into handle_event handler") Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/notify/mark.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 0ba789e1cc21a7..258d99087183df 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -255,23 +255,20 @@ void fsnotify_put_mark(struct fsnotify_mark *mark) */ static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark) { - struct fsnotify_group *group; - if (!mark) return true; - group = mark->group; - /* - * Since acquisition of mark reference is an atomic op as well, we can - * be sure this inc is seen before any effect of refcount increment. - */ - atomic_inc(&group->user_waits); - if (atomic_inc_not_zero(&mark->refcnt)) - return true; - - if (atomic_dec_and_test(&group->user_waits) && group->shutdown) - wake_up(&group->notification_waitq); - + if (atomic_inc_not_zero(&mark->refcnt)) { + spin_lock(&mark->lock); + if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) { + /* mark is attached, group is still alive then */ + atomic_inc(&mark->group->user_waits); + spin_unlock(&mark->lock); + return true; + } + spin_unlock(&mark->lock); + fsnotify_put_mark(mark); + } return false; } From 496e65f64903a3ad420a07466c5d008432136985 Mon Sep 17 00:00:00 2001 From: Rameshwar Prasad Sahu Date: Thu, 2 Nov 2017 16:31:07 +0530 Subject: [PATCH 0138/3326] ata: fixes kernel crash while tracing ata_eh_link_autopsy event commit f1601113ddc0339a745e702f4fb1ca37d4875e65 upstream. When tracing ata link error event, the kernel crashes when the disk is removed due to NULL pointer access by trace_ata_eh_link_autopsy API. This occurs as the dev is NULL when the disk disappeared. This patch fixes this crash by calling trace_ata_eh_link_autopsy only if "dev" is not NULL. v2 changes: Removed direct passing "link" pointer instead of "dev" in trace API. Signed-off-by: Rameshwar Prasad Sahu Signed-off-by: Tejun Heo Fixes: 255c03d15a29 ("libata: Add tracepoints") Signed-off-by: Greg Kroah-Hartman --- drivers/ata/libata-eh.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index e4effef0c83f2b..ea20e0eb4d5ac3 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2264,8 +2264,8 @@ static void ata_eh_link_autopsy(struct ata_link *link) if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) eflags |= ATA_EFLAG_DUBIOUS_XFER; ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); + trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); } - trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); DPRINTK("EXIT\n"); } From b891e716b8f89b87e1da4976047810aeaf3bd59f Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 6 Oct 2017 23:09:55 -0400 Subject: [PATCH 0139/3326] ext4: fix interaction between i_size, fallocate, and delalloc after a crash commit 51e3ae81ec58e95f10a98ef3dd6d7bce5d8e35a2 upstream. If there are pending writes subject to delayed allocation, then i_size will show size after the writes have completed, while i_disksize contains the value of i_size on the disk (since the writes have not been persisted to disk). If fallocate(2) is called with the FALLOC_FL_KEEP_SIZE flag, either with or without the FALLOC_FL_ZERO_RANGE flag set, and the new size after the fallocate(2) is between i_size and i_disksize, then after a crash, if a journal commit has resulted in the changes made by the fallocate() call to be persisted after a crash, but the delayed allocation write has not resolved itself, i_size would not be updated, and this would cause the following e2fsck complaint: Inode 12, end of extent exceeds allowed value (logical block 33, physical block 33441, len 7) This can only take place on a sparse file, where the fallocate(2) call is allocating blocks in a range which is before a pending delayed allocation write which is extending i_size. Since this situation is quite rare, and the window in which the crash must take place is typically < 30 seconds, in practice this condition will rarely happen. Nevertheless, it can be triggered in testing, and in particular by xfstests generic/456. Signed-off-by: Theodore Ts'o Reported-by: Amir Goldstein Signed-off-by: Greg Kroah-Hartman --- fs/ext4/extents.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 97f0fd06728d7b..07bca11749d406 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4794,7 +4794,8 @@ static long ext4_zero_range(struct file *file, loff_t offset, } if (!(mode & FALLOC_FL_KEEP_SIZE) && - offset + len > i_size_read(inode)) { + (offset + len > i_size_read(inode) || + offset + len > EXT4_I(inode)->i_disksize)) { new_size = offset + len; ret = inode_newsize_ok(inode, new_size); if (ret) @@ -4965,7 +4966,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) } if (!(mode & FALLOC_FL_KEEP_SIZE) && - offset + len > i_size_read(inode)) { + (offset + len > i_size_read(inode) || + offset + len > EXT4_I(inode)->i_disksize)) { new_size = offset + len; ret = inode_newsize_ok(inode, new_size); if (ret) From 713cb65003f1d1038d211606871b12ff555fb3f3 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Thu, 12 Oct 2017 11:52:34 -0400 Subject: [PATCH 0140/3326] ext4: prevent data corruption with inline data + DAX commit 559db4c6d784ceedc2a5418ced4d357cb843e221 upstream. If an inode has inline data it is currently prevented from using DAX by a check in ext4_set_inode_flags(). When the inode grows inline data via ext4_create_inline_data() or removes its inline data via ext4_destroy_inline_data_nolock(), the value of S_DAX can change. Currently these changes are unsafe because we don't hold off page faults and I/O, write back dirty radix tree entries and invalidate all mappings. There are also issues with mm-level races when changing the value of S_DAX, as well as issues with the VM_MIXEDMAP flag: https://www.spinics.net/lists/linux-xfs/msg09859.html The unsafe transition of S_DAX can reliably cause data corruption, as shown by the following fstest: https://patchwork.kernel.org/patch/9948381/ Fix this issue by preventing the DAX mount option from being used on filesystems that were created to support inline data. Inline data is an option given to mkfs.ext4. Signed-off-by: Ross Zwisler Signed-off-by: Theodore Ts'o Reviewed-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/ext4/inline.c | 10 ---------- fs/ext4/super.c | 5 +++++ 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 28c5c3abddb305..fd9501977f1c7a 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -302,11 +302,6 @@ static int ext4_create_inline_data(handle_t *handle, EXT4_I(inode)->i_inline_size = len + EXT4_MIN_INLINE_DATA_SIZE; ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); ext4_set_inode_flag(inode, EXT4_INODE_INLINE_DATA); - /* - * Propagate changes to inode->i_flags as well - e.g. S_DAX may - * get cleared - */ - ext4_set_inode_flags(inode); get_bh(is.iloc.bh); error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); @@ -451,11 +446,6 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle, } } ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA); - /* - * Propagate changes to inode->i_flags as well - e.g. S_DAX may - * get set. - */ - ext4_set_inode_flags(inode); get_bh(is.iloc.bh); error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index b0915b734a3817..f29351c6661097 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3708,6 +3708,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } if (sbi->s_mount_opt & EXT4_MOUNT_DAX) { + if (ext4_has_feature_inline_data(sb)) { + ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem" + " that may contain inline data"); + goto failed_mount; + } err = bdev_dax_supported(sb, blocksize); if (err) goto failed_mount; From 22e5190899e8d159fe59e0c0e3b9a1085ee5d4e8 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Thu, 12 Oct 2017 11:54:08 -0400 Subject: [PATCH 0141/3326] ext4: prevent data corruption with journaling + DAX commit e9072d859df3e0f2c3ba450f0d1739595c2d5d13 upstream. The current code has the potential for data corruption when changing an inode's journaling mode, as that can result in a subsequent unsafe change in S_DAX. I've captured an instance of this data corruption in the following fstest: https://patchwork.kernel.org/patch/9948377/ Prevent this data corruption from happening by disallowing changes to the journaling mode if the '-o dax' mount option was used. This means that for a given filesystem we could have a mix of inodes using either DAX or data journaling, but whatever state the inodes are in will be held for the duration of the mount. Signed-off-by: Ross Zwisler Signed-off-by: Theodore Ts'o Reviewed-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/ext4/inode.c | 5 ----- fs/ext4/ioctl.c | 16 +++++++++++++--- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 90afeb7293a6b9..38eb621edd80c9 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5967,11 +5967,6 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); } ext4_set_aops(inode); - /* - * Update inode->i_flags after EXT4_INODE_JOURNAL_DATA was updated. - * E.g. S_DAX may get cleared / set. - */ - ext4_set_inode_flags(inode); jbd2_journal_unlock_updates(journal); percpu_up_write(&sbi->s_journal_flag_rwsem); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 75d83471f65c40..d97f40396765f9 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -291,10 +291,20 @@ static int ext4_ioctl_setflags(struct inode *inode, if (err) goto flags_out; - if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) + if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { + /* + * Changes to the journaling mode can cause unsafe changes to + * S_DAX if we are using the DAX mount option. + */ + if (test_opt(inode->i_sb, DAX)) { + err = -EBUSY; + goto flags_out; + } + err = ext4_change_inode_journal_flag(inode, jflag); - if (err) - goto flags_out; + if (err) + goto flags_out; + } if (migrate) { if (flags & EXT4_EXTENTS_FL) err = ext4_ext_migrate(inode); From 27180144e50dccd03979469c3d714cad4815aa04 Mon Sep 17 00:00:00 2001 From: Henrik Eriksson Date: Tue, 21 Nov 2017 09:29:28 +0100 Subject: [PATCH 0142/3326] ALSA: pcm: update tstamp only if audio_tstamp changed commit 20e3f985bb875fea4f86b04eba4b6cc29bfd6b71 upstream. commit 3179f6200188 ("ALSA: core: add .get_time_info") had a side effect of changing the behaviour of the PCM runtime tstamp. Prior to this change tstamp was not updated by snd_pcm_update_hw_ptr0() unless the hw_ptr had moved, after this change tstamp was always updated. For an application using alsa-lib, doing snd_pcm_readi() followed by snd_pcm_status() to estimate the age of the read samples by subtracting status->avail * [sample rate] from status->tstamp this change degraded the accuracy of the estimate on devices where the pcm hw does not provide a granular hw_ptr, e.g., devices using soc-generic-dmaengine-pcm.c and a dma-engine with residue_granularity DMA_RESIDUE_GRANULARITY_DESCRIPTOR. The accuracy of the estimate depended on the latency between the PCM hw completing a period and the driver called snd_pcm_period_elapsed() to notify ALSA core, typically determined by interrupt handling latency. After the change the accuracy of the estimate depended on the latency between the PCM hw completing a period and the application calling snd_pcm_status(), determined by the scheduling of the application process. The maximum error of the estimate is one period length in both cases, but the error average and variance is smaller when it depends on interrupt latency. Instead of always updating tstamp, update it only if audio_tstamp changed. Fixes: 3179f6200188 ("ALSA: core: add .get_time_info") Suggested-by: Pierre-Louis Bossart Signed-off-by: Henrik Eriksson Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/pcm_lib.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index a93a4235a33287..10e7ef7a8804b1 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -248,8 +248,10 @@ static void update_audio_tstamp(struct snd_pcm_substream *substream, runtime->rate); *audio_tstamp = ns_to_timespec(audio_nsecs); } - runtime->status->audio_tstamp = *audio_tstamp; - runtime->status->tstamp = *curr_tstamp; + if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) { + runtime->status->audio_tstamp = *audio_tstamp; + runtime->status->tstamp = *curr_tstamp; + } /* * re-take a driver timestamp to let apps detect if the reference tstamp From 7a0e3e553e0dfcabd7b5928f33352c0559afeb75 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 21 Nov 2017 16:55:51 +0100 Subject: [PATCH 0143/3326] ALSA: usb-audio: Add sanity checks to FE parser commit d937cd6790a2bef2d07b500487646bd794c039bb upstream. When the usb-audio descriptor contains the malformed feature unit description with a too short length, the driver may access out-of-bounds. Add a sanity check of the header size at the beginning of parse_audio_feature_unit(). Fixes: 23caaf19b11e ("ALSA: usb-mixer: Add support for Audio Class v2.0") Reported-by: Andrey Konovalov Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/mixer.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 91bc8f18791e46..9bc34cb8a3ba34 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1469,6 +1469,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, __u8 *bmaControls; if (state->mixer->protocol == UAC_VERSION_1) { + if (hdr->bLength < 7) { + usb_audio_err(state->chip, + "unit %u: invalid UAC_FEATURE_UNIT descriptor\n", + unitid); + return -EINVAL; + } csize = hdr->bControlSize; if (!csize) { usb_audio_dbg(state->chip, @@ -1486,6 +1492,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, } } else { struct uac2_feature_unit_descriptor *ftr = _ftr; + if (hdr->bLength < 6) { + usb_audio_err(state->chip, + "unit %u: invalid UAC_FEATURE_UNIT descriptor\n", + unitid); + return -EINVAL; + } csize = 4; channels = (hdr->bLength - 6) / 4 - 1; bmaControls = ftr->bmaControls; From 5f7873c9945dd026e20d006dd18b75b8f05cdb5a Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 21 Nov 2017 17:00:32 +0100 Subject: [PATCH 0144/3326] ALSA: usb-audio: Fix potential out-of-bound access at parsing SU commit f658f17b5e0e339935dca23e77e0f3cad591926b upstream. The usb-audio driver may trigger an out-of-bound access at parsing a malformed selector unit, as it checks the header length only after evaluating bNrInPins field, which can be already above the given length. Fix it by adding the length check beforehand. Fixes: 99fc86450c43 ("ALSA: usb-mixer: parse descriptors with structs") Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/mixer.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 9bc34cb8a3ba34..2b835cca41b16a 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -2098,7 +2098,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, const struct usbmix_name_map *map; char **namelist; - if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) { + if (desc->bLength < 5 || !desc->bNrInPins || + desc->bLength < 5 + desc->bNrInPins) { usb_audio_err(state->chip, "invalid SELECTOR UNIT descriptor %d\n", unitid); return -EINVAL; From 47f25441ffcfb262b83ec8acc7d94eddc4727014 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 21 Nov 2017 17:28:06 +0100 Subject: [PATCH 0145/3326] ALSA: usb-audio: Add sanity checks in v2 clock parsers commit 0a62d6c966956d77397c32836a5bbfe3af786fc1 upstream. The helper functions to parse and look for the clock source, selector and multiplier unit may return the descriptor with a too short length than required, while there is no sanity check in the caller side. Add some sanity checks in the parsers, at least, to guarantee the given descriptor size, for avoiding the potential crashes. Fixes: 79f920fbff56 ("ALSA: usb-audio: parse clock topology of UAC2 devices") Reported-by: Andrey Konovalov Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/clock.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/sound/usb/clock.c b/sound/usb/clock.c index 26dd5f20f14943..eb3396ffba4c4c 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -43,7 +43,7 @@ static struct uac_clock_source_descriptor * while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, cs, UAC2_CLOCK_SOURCE))) { - if (cs->bClockID == clock_id) + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) return cs; } @@ -59,8 +59,11 @@ static struct uac_clock_selector_descriptor * while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, cs, UAC2_CLOCK_SELECTOR))) { - if (cs->bClockID == clock_id) + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) { + if (cs->bLength < 5 + cs->bNrInPins) + return NULL; return cs; + } } return NULL; @@ -75,7 +78,7 @@ static struct uac_clock_multiplier_descriptor * while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, cs, UAC2_CLOCK_MULTIPLIER))) { - if (cs->bClockID == clock_id) + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) return cs; } From 6352ec905f914143ca80ea68b8cbf30f6ae27aa3 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 21 Nov 2017 16:36:11 +0100 Subject: [PATCH 0146/3326] ALSA: timer: Remove kernel warning at compat ioctl error paths commit 3d4e8303f2c747c8540a0a0126d0151514f6468b upstream. Some timer compat ioctls have NULL checks of timer instance with snd_BUG_ON() that bring up WARN_ON() when the debug option is set. Actually the condition can be met in the normal situation and it's confusing and bad to spew kernel warnings with stack trace there. Let's remove snd_BUG_ON() invocation and replace with the simple checks. Also, correct the error code to EBADFD to follow the native ioctl error handling. Reported-by: syzbot Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/timer_compat.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c index 59127b6ef39ee9..e00f7e399e462a 100644 --- a/sound/core/timer_compat.c +++ b/sound/core/timer_compat.c @@ -66,11 +66,11 @@ static int snd_timer_user_info_compat(struct file *file, struct snd_timer *t; tu = file->private_data; - if (snd_BUG_ON(!tu->timeri)) - return -ENXIO; + if (!tu->timeri) + return -EBADFD; t = tu->timeri->timer; - if (snd_BUG_ON(!t)) - return -ENXIO; + if (!t) + return -EBADFD; memset(&info, 0, sizeof(info)); info.card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) @@ -99,8 +99,8 @@ static int snd_timer_user_status_compat(struct file *file, struct snd_timer_status32 status; tu = file->private_data; - if (snd_BUG_ON(!tu->timeri)) - return -ENXIO; + if (!tu->timeri) + return -EBADFD; memset(&status, 0, sizeof(status)); status.tstamp.tv_sec = tu->tstamp.tv_sec; status.tstamp.tv_nsec = tu->tstamp.tv_nsec; From 072c5925f867ecf3b1567f898abe7ace6b283079 Mon Sep 17 00:00:00 2001 From: Kailang Yang Date: Wed, 8 Nov 2017 15:28:33 +0800 Subject: [PATCH 0147/3326] ALSA: hda/realtek - Fix ALC275 no sound issue commit 3aabf94c2d95fe465d5fa8590113d1c1f7d8333d upstream. Sound works after a cold boot but not after a reboot from windows. This patch will solve this issue. This is relation with Class-D power control. [ The bug was reported in Bugzilla below for Sony VAIO SVS13A1C5E -- tiwai] Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=197737 Signed-off-by: Kailang Yang Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index dce0682c500191..804a5707662ab2 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -341,6 +341,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0299: alc_update_coef_idx(codec, 0x10, 1<<9, 0); break; + case 0x10ec0275: + alc_update_coef_idx(codec, 0xe, 0, 1<<0); + break; case 0x10ec0293: alc_update_coef_idx(codec, 0xa, 1<<13, 0); break; From 2ee1a8cf61714a960f7286d07890499553ea28e0 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 17 Nov 2017 12:08:40 +0100 Subject: [PATCH 0148/3326] ALSA: hda: Fix too short HDMI/DP chmap reporting commit c2432466f583cb719b35a41e757da587d9ab1d00 upstream. We got a regression report about the HD-audio HDMI chmap, where some surround channels are reported as UNKNOWN. The git bisection pointed the culprit at the commit 9b3dc8aa3fb1 ("ALSA: hda - Register chmap obj as priv data instead of codec"). The story behind scene is like this: - While moving the code out of the legacy HDA to the HDA common place, the patch modifies the code to obtain the chmap array indirectly in a byte array, and it expands it to kctl value array. - At the latter operation, the size of the array is wrongly passed by sizeof() to the pointer. - It can be 4 on 32bit arch, thus too short for 6+ channels. (And that's the reason why it didn't hit other persons; it's 8 on 64bit arch, thus it's usually enough.) The code was further changed meanwhile, but the problem persisted. Let's fix it by correctly evaluating the array size. Fixes: 9b3dc8aa3fb1 ("ALSA: hda - Register chmap obj as priv data instead of codec") Reported-by: VDR User Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/hda/hdmi_chmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c index 81acc20c253587..f21633cd9b38ea 100644 --- a/sound/hda/hdmi_chmap.c +++ b/sound/hda/hdmi_chmap.c @@ -746,7 +746,7 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol, memset(pcm_chmap, 0, sizeof(pcm_chmap)); chmap->ops.get_chmap(chmap->hdac, pcm_idx, pcm_chmap); - for (i = 0; i < sizeof(chmap); i++) + for (i = 0; i < ARRAY_SIZE(pcm_chmap); i++) ucontrol->value.integer.value[i] = pcm_chmap[i]; return 0; From 665000d9507ac332b7dede78fb3f5dd7b6f77177 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 22 Nov 2017 12:34:56 +0100 Subject: [PATCH 0149/3326] ALSA: hda - Fix yet remaining issue with vmaster 0dB initialization commit d6c0615f510bc1ee26cfb2b9a3343ac99b9c46fb upstream. The previous fix for addressing the breakage in vmaster slave initialization, commit a91d66129fb9 ("ALSA: hda - Fix incorrect TLV callback check introduced during set_fs() removal"), introduced a new helper to process over each slave kctl. However, this helper passes only the original kctl, not the virtual slave kctl. As a result, HD-audio driver (which is the only user so far) couldn't initialize the slave correctly because it's trying to update the value directly with the original kctl, not with the mapped kctl. This patch fixes the situation again by passing both the mapped slaved and original slave kctls to the function. Luckily there is a single caller as of now, so changing the call signature is no big matter. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=197959 Fixes: a91d66129fb9 ("ALSA: hda - Fix incorrect TLV callback check introduced during set_fs() removal") Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- include/sound/control.h | 4 +++- sound/core/vmaster.c | 6 ++++-- sound/pci/hda/hda_codec.c | 10 +++++++--- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/include/sound/control.h b/include/sound/control.h index a1f1152bc68761..ca13a44ae9d44e 100644 --- a/include/sound/control.h +++ b/include/sound/control.h @@ -249,7 +249,9 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl, void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only); #define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true) int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl, - int (*func)(struct snd_kcontrol *, void *), + int (*func)(struct snd_kcontrol *vslave, + struct snd_kcontrol *slave, + void *arg), void *arg); /* diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c index e43af18d438363..8632301489fa66 100644 --- a/sound/core/vmaster.c +++ b/sound/core/vmaster.c @@ -495,7 +495,9 @@ EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster); * Returns 0 if successful, or a negative error code. */ int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl, - int (*func)(struct snd_kcontrol *, void *), + int (*func)(struct snd_kcontrol *vslave, + struct snd_kcontrol *slave, + void *arg), void *arg) { struct link_master *master; @@ -507,7 +509,7 @@ int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl, if (err < 0) return err; list_for_each_entry(slave, &master->slaves, list) { - err = func(&slave->slave, arg); + err = func(slave->kctl, &slave->slave, arg); if (err < 0) return err; } diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index a0989d231fd00f..417abbb1f72caf 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -1823,7 +1823,9 @@ struct slave_init_arg { }; /* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */ -static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg) +static int init_slave_0dB(struct snd_kcontrol *slave, + struct snd_kcontrol *kctl, + void *_arg) { struct slave_init_arg *arg = _arg; int _tlv[4]; @@ -1860,7 +1862,7 @@ static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg) arg->step = step; val = -tlv[2] / step; if (val > 0) { - put_kctl_with_value(kctl, val); + put_kctl_with_value(slave, val); return val; } @@ -1868,7 +1870,9 @@ static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg) } /* unmute the slave via snd_ctl_apply_vmaster_slaves() */ -static int init_slave_unmute(struct snd_kcontrol *slave, void *_arg) +static int init_slave_unmute(struct snd_kcontrol *slave, + struct snd_kcontrol *kctl, + void *_arg) { return put_kctl_with_value(slave, 1); } From b177556318a731878a1e9e8a9bb28b98f7bc832c Mon Sep 17 00:00:00 2001 From: Kailang Yang Date: Wed, 22 Nov 2017 15:21:32 +0800 Subject: [PATCH 0150/3326] ALSA: hda/realtek - Fix ALC700 family no sound issue commit 2d7fe6185722b0817bb345f62ab06b76a7b26542 upstream. It maybe the typo for ALC700 support patch. To fix the bit value on this patch. Fixes: 6fbae35a3170 ("ALSA: hda/realtek - Add support for new codecs ALC700/ALC701/ALC703") Signed-off-by: Kailang Yang Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 804a5707662ab2..7c39114d124fee 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6866,7 +6866,7 @@ static int patch_alc269(struct hda_codec *codec) case 0x10ec0703: spec->codec_variant = ALC269_TYPE_ALC700; spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ - alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */ + alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ break; } From 0bc4abfb872532fd567397ed414b3336f6df7971 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 8 Nov 2017 16:47:08 +0100 Subject: [PATCH 0151/3326] ASoC: sun8i-codec: Invert Master / Slave condition commit 560bfe774f058e97596f30ff71cffdac52b72914 upstream. The current code had the condition backward when checking if the codec should be running in slave or master mode. Fix it, and make the comment a bit more readable. Fixes: 36c684936fae ("ASoC: Add sun8i digital audio codec") Signed-off-by: Maxime Ripard Reviewed-by: Chen-Yu Tsai Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/sunxi/sun8i-codec.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c index abfb710df7cbce..038107baf41497 100644 --- a/sound/soc/sunxi/sun8i-codec.c +++ b/sound/soc/sunxi/sun8i-codec.c @@ -170,11 +170,11 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) /* clock masters */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { - case SND_SOC_DAIFMT_CBS_CFS: /* DAI Slave */ - value = 0x0; /* Codec Master */ + case SND_SOC_DAIFMT_CBS_CFS: /* Codec slave, DAI master */ + value = 0x1; break; - case SND_SOC_DAIFMT_CBM_CFM: /* DAI Master */ - value = 0x1; /* Codec Slave */ + case SND_SOC_DAIFMT_CBM_CFM: /* Codec Master, DAI slave */ + value = 0x0; break; default: return -EINVAL; From eca496d8fe4bcad503e61dd4510907478ce00118 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 8 Nov 2017 16:47:10 +0100 Subject: [PATCH 0152/3326] ASoC: sun8i-codec: Fix left and right channels inversion commit 18c1bf35c1c09bca05cf70bc984a4764e0b0372b upstream. Since its introduction, the codec had an inversion of the left and right channels. It turned out to be pretty simple as it appears that the codec doesn't have the same polarity on the LRCK signal than the I2S block. Fix this by inverting our bit value for the LRCK inversion. Fixes: 36c684936fae ("ASoC: Add sun8i digital audio codec") Signed-off-by: Maxime Ripard Reviewed-by: Chen-Yu Tsai Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/sunxi/sun8i-codec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c index 038107baf41497..c2ceca485d6a0f 100644 --- a/sound/soc/sunxi/sun8i-codec.c +++ b/sound/soc/sunxi/sun8i-codec.c @@ -199,7 +199,7 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) value << SUN8I_AIF1CLK_CTRL_AIF1_BCLK_INV); regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL, BIT(SUN8I_AIF1CLK_CTRL_AIF1_LRCK_INV), - value << SUN8I_AIF1CLK_CTRL_AIF1_LRCK_INV); + !value << SUN8I_AIF1CLK_CTRL_AIF1_LRCK_INV); /* DAI format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { From 79e019f2a539a4ff25327a3831c7c74d0c73c196 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 9 Nov 2017 10:39:24 +0100 Subject: [PATCH 0153/3326] ASoC: sun8i-codec: Set the BCLK divider commit 316b7758c998fb13371d14bb6c9e45ab129c19a7 upstream. While the current code was reporting to be able to work in master mode, it failed to do so because the BCLK divider wasn't programmed, meaning that the BCLK would run at the PLL's frequency no matter the sample rate. It was obviously a bit too fast. Add support to retrieve the divider to use, and set it. Since our PLL is not always able to generate a perfect multiple of the sample rate, we'll have to choose the closest divider that matches our setup. Fixes: 36c684936fae ("ASoC: Add sun8i digital audio codec") Reviewed-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/sunxi/sun8i-codec.c | 51 +++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c index c2ceca485d6a0f..7a312168f8647a 100644 --- a/sound/soc/sunxi/sun8i-codec.c +++ b/sound/soc/sunxi/sun8i-codec.c @@ -73,6 +73,7 @@ #define SUN8I_SYS_SR_CTRL_AIF2_FS_MASK GENMASK(11, 8) #define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK GENMASK(5, 4) #define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK GENMASK(8, 6) +#define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK GENMASK(12, 9) struct sun8i_codec { struct device *dev; @@ -226,12 +227,57 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) return 0; } +struct sun8i_codec_clk_div { + u8 div; + u8 val; +}; + +static const struct sun8i_codec_clk_div sun8i_codec_bclk_div[] = { + { .div = 1, .val = 0 }, + { .div = 2, .val = 1 }, + { .div = 4, .val = 2 }, + { .div = 6, .val = 3 }, + { .div = 8, .val = 4 }, + { .div = 12, .val = 5 }, + { .div = 16, .val = 6 }, + { .div = 24, .val = 7 }, + { .div = 32, .val = 8 }, + { .div = 48, .val = 9 }, + { .div = 64, .val = 10 }, + { .div = 96, .val = 11 }, + { .div = 128, .val = 12 }, + { .div = 192, .val = 13 }, +}; + +static u8 sun8i_codec_get_bclk_div(struct sun8i_codec *scodec, + unsigned int rate, + unsigned int word_size) +{ + unsigned long clk_rate = clk_get_rate(scodec->clk_module); + unsigned int div = clk_rate / rate / word_size / 2; + unsigned int best_val = 0, best_diff = ~0; + int i; + + for (i = 0; i < ARRAY_SIZE(sun8i_codec_bclk_div); i++) { + const struct sun8i_codec_clk_div *bdiv = &sun8i_codec_bclk_div[i]; + unsigned int diff = abs(bdiv->div - div); + + if (diff < best_diff) { + best_diff = diff; + best_val = bdiv->val; + } + } + + return best_val; +} + static int sun8i_codec_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct sun8i_codec *scodec = snd_soc_codec_get_drvdata(dai->codec); int sample_rate; + u8 bclk_div; /* * The CPU DAI handles only a sample of 16 bits. Configure the @@ -241,6 +287,11 @@ static int sun8i_codec_hw_params(struct snd_pcm_substream *substream, SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK, SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_16); + bclk_div = sun8i_codec_get_bclk_div(scodec, params_rate(params), 16); + regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL, + SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK, + bclk_div << SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV); + regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL, SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK, SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_16); From 04dd27af8ea24db24d0777a9ef3259621f32d6f5 Mon Sep 17 00:00:00 2001 From: Joakim Tjernlund Date: Wed, 11 Oct 2017 12:40:55 +0200 Subject: [PATCH 0154/3326] mfd: lpc_ich: Avoton/Rangeley uses SPI_BYT method commit 07d70913dce59f3c8e5d0ca76250861158a9ca6c upstream. Avoton/Rangeley are based on Silvermount micro-architecture, like Bay Trail, and uses the INTEL_SPI_BYT method to drive SPI. Signed-off-by: Joakim Tjernlund Acked-by: Mika Westerberg Signed-off-by: Lee Jones Signed-off-by: Greg Kroah-Hartman --- drivers/mfd/lpc_ich.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c index 450ae36645aa29..cf1120abbf521f 100644 --- a/drivers/mfd/lpc_ich.c +++ b/drivers/mfd/lpc_ich.c @@ -522,6 +522,7 @@ static struct lpc_ich_info lpc_chipset_info[] = { .name = "Avoton SoC", .iTCO_version = 3, .gpio_version = AVOTON_GPIO, + .spi_type = INTEL_SPI_BYT, }, [LPC_BAYTRAIL] = { .name = "Bay Trail SoC", From 357621bca2cb346513d7f4d8f57edb53bd7bbe44 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 24 Sep 2017 18:36:44 -0400 Subject: [PATCH 0155/3326] fix a page leak in vhost_scsi_iov_to_sgl() error recovery commit 11d49e9d089ccec81be87c2386dfdd010d7f7f6e upstream. we are advancing sg as we go, so the pages we need to drop in case of error are *before* the current sg. Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- drivers/vhost/scsi.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 046f6d280af577..e47c5bc3ddcadf 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -688,6 +688,7 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, struct scatterlist *sg, int sg_count) { size_t off = iter->iov_offset; + struct scatterlist *p = sg; int i, ret; for (i = 0; i < iter->nr_segs; i++) { @@ -696,8 +697,8 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write); if (ret < 0) { - for (i = 0; i < sg_count; i++) { - struct page *page = sg_page(&sg[i]); + while (p < sg) { + struct page *page = sg_page(p++); if (page) put_page(page); } From d8319b3bbc296db662736b5c00ff79dccafa19a0 Mon Sep 17 00:00:00 2001 From: Tuomas Tynkkynen Date: Sun, 19 Nov 2017 11:28:43 +0200 Subject: [PATCH 0156/3326] 9p: Fix missing commas in mount options commit 61b272c3aa170b3e461b8df636407b29f35f98eb upstream. Since commit c4fac9100456 ("9p: Implement show_options"), the mount options of 9p filesystems are printed out with some missing commas between the individual options: p9-scratch on /mnt/scratch type 9p (rw,dirsync,loose,access=clienttrans=virtio) Add them back. Fixes: c4fac9100456 ("9p: Implement show_options") Signed-off-by: Tuomas Tynkkynen Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- net/9p/client.c | 2 +- net/9p/trans_fd.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/net/9p/client.c b/net/9p/client.c index 4674235b0d9b1f..b2583ee2fbe9ce 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -82,7 +82,7 @@ int p9_show_client_options(struct seq_file *m, struct p9_client *clnt) { if (clnt->msize != 8192) seq_printf(m, ",msize=%u", clnt->msize); - seq_printf(m, "trans=%s", clnt->trans_mod->name); + seq_printf(m, ",trans=%s", clnt->trans_mod->name); switch (clnt->proto_version) { case p9_proto_legacy: diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 903a190319b94d..985046ae42312e 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -724,12 +724,12 @@ static int p9_fd_show_options(struct seq_file *m, struct p9_client *clnt) { if (clnt->trans_mod == &p9_tcp_trans) { if (clnt->trans_opts.tcp.port != P9_PORT) - seq_printf(m, "port=%u", clnt->trans_opts.tcp.port); + seq_printf(m, ",port=%u", clnt->trans_opts.tcp.port); } else if (clnt->trans_mod == &p9_fd_trans) { if (clnt->trans_opts.fd.rfd != ~0) - seq_printf(m, "rfd=%u", clnt->trans_opts.fd.rfd); + seq_printf(m, ",rfd=%u", clnt->trans_opts.fd.rfd); if (clnt->trans_opts.fd.wfd != ~0) - seq_printf(m, "wfd=%u", clnt->trans_opts.fd.wfd); + seq_printf(m, ",wfd=%u", clnt->trans_opts.fd.wfd); } return 0; } From b178968f557219ae359cbd6c12d84f159038a38d Mon Sep 17 00:00:00 2001 From: Tuomas Tynkkynen Date: Wed, 6 Sep 2017 17:59:07 +0300 Subject: [PATCH 0157/3326] fs/9p: Compare qid.path in v9fs_test_inode commit 8ee031631546cf2f7859cc69593bd60bbdd70b46 upstream. Commit fd2421f54423 ("fs/9p: When doing inode lookup compare qid details and inode mode bits.") transformed v9fs_qid_iget() to use iget5_locked() instead of iget_locked(). However, the test() callback is not checking fid.path at all, which means that a lookup in the inode cache can now accidentally locate a completely wrong inode from the same inode hash bucket if the other fields (qid.type and qid.version) match. Fixes: fd2421f54423 ("fs/9p: When doing inode lookup compare qid details and inode mode bits.") Reviewed-by: Latchesar Ionkov Signed-off-by: Tuomas Tynkkynen Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- fs/9p/vfs_inode.c | 3 +++ fs/9p/vfs_inode_dotl.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 2a5de610dd8fd5..bdabb2765d1b3e 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -483,6 +483,9 @@ static int v9fs_test_inode(struct inode *inode, void *data) if (v9inode->qid.type != st->qid.type) return 0; + + if (v9inode->qid.path != st->qid.path) + return 0; return 1; } diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index 70f9887c59a90f..7f6ae21a27b3ca 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -87,6 +87,9 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data) if (v9inode->qid.type != st->qid.type) return 0; + + if (v9inode->qid.path != st->qid.path) + return 0; return 1; } From b5c87f23a703ac257134ac77f7902d0cc10d0a09 Mon Sep 17 00:00:00 2001 From: Tuomas Tynkkynen Date: Wed, 6 Sep 2017 17:59:08 +0300 Subject: [PATCH 0158/3326] net/9p: Switch to wait_event_killable() commit 9523feac272ccad2ad8186ba4fcc89103754de52 upstream. Because userspace gets Very Unhappy when calls like stat() and execve() return -EINTR on 9p filesystem mounts. For instance, when bash is looking in PATH for things to execute and some SIGCHLD interrupts stat(), bash can throw a spurious 'command not found' since it doesn't retry the stat(). In practice, hitting the problem is rare and needs a really slow/bogged down 9p server. Signed-off-by: Tuomas Tynkkynen Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- net/9p/client.c | 3 +-- net/9p/trans_virtio.c | 13 ++++++------- net/9p/trans_xen.c | 4 ++-- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/net/9p/client.c b/net/9p/client.c index b2583ee2fbe9ce..b433aff5ff13ce 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -773,8 +773,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) } again: /* Wait for the response */ - err = wait_event_interruptible(*req->wq, - req->status >= REQ_STATUS_RCVD); + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD); /* * Make sure our req is coherent with regard to updates in other diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index f24b25c25106fb..f3a4efcf145642 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -286,8 +286,8 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) if (err == -ENOSPC) { chan->ring_bufs_avail = 0; spin_unlock_irqrestore(&chan->lock, flags); - err = wait_event_interruptible(*chan->vc_wq, - chan->ring_bufs_avail); + err = wait_event_killable(*chan->vc_wq, + chan->ring_bufs_avail); if (err == -ERESTARTSYS) return err; @@ -327,7 +327,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan, * Other zc request to finish here */ if (atomic_read(&vp_pinned) >= chan->p9_max_pages) { - err = wait_event_interruptible(vp_wq, + err = wait_event_killable(vp_wq, (atomic_read(&vp_pinned) < chan->p9_max_pages)); if (err == -ERESTARTSYS) return err; @@ -471,8 +471,8 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, if (err == -ENOSPC) { chan->ring_bufs_avail = 0; spin_unlock_irqrestore(&chan->lock, flags); - err = wait_event_interruptible(*chan->vc_wq, - chan->ring_bufs_avail); + err = wait_event_killable(*chan->vc_wq, + chan->ring_bufs_avail); if (err == -ERESTARTSYS) goto err_out; @@ -489,8 +489,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, virtqueue_kick(chan->vq); spin_unlock_irqrestore(&chan->lock, flags); p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); - err = wait_event_interruptible(*req->wq, - req->status >= REQ_STATUS_RCVD); + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD); /* * Non kernel buffers are pinned, unpin them */ diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 6ad3e043c6174a..325c56043007d8 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -156,8 +156,8 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req) ring = &priv->rings[num]; again: - while (wait_event_interruptible(ring->wq, - p9_xen_write_todo(ring, size)) != 0) + while (wait_event_killable(ring->wq, + p9_xen_write_todo(ring, size)) != 0) ; spin_lock_irqsave(&ring->lock, flags); From 645fbe74f18409fcafca73763812190e077d2174 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 6 Nov 2017 11:59:05 -0800 Subject: [PATCH 0159/3326] scsi: qla2xxx: Suppress a kernel complaint in qla_init_base_qpair() commit 8653188763b56e0bcbdcab30cc7b059672c900ac upstream. Avoid that the following is reported while loading the qla2xxx kernel module: BUG: using smp_processor_id() in preemptible [00000000] code: modprobe/783 caller is debug_smp_processor_id+0x17/0x20 CPU: 7 PID: 783 Comm: modprobe Not tainted 4.14.0-rc8-dbg+ #2 Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 Call Trace: dump_stack+0x8e/0xce check_preemption_disabled+0xe3/0xf0 debug_smp_processor_id+0x17/0x20 qla2x00_probe_one+0xf43/0x26c0 [qla2xxx] pci_device_probe+0xca/0x140 driver_probe_device+0x2e2/0x440 __driver_attach+0xa3/0xe0 bus_for_each_dev+0x5f/0x90 driver_attach+0x19/0x20 bus_add_driver+0x1c0/0x260 driver_register+0x5b/0xd0 __pci_register_driver+0x63/0x70 qla2x00_module_init+0x1d6/0x222 [qla2xxx] do_one_initcall+0x3c/0x163 do_init_module+0x55/0x1eb load_module+0x20a2/0x2890 SYSC_finit_module+0xd7/0xf0 SyS_finit_module+0x9/0x10 entry_SYSCALL_64_fastpath+0x23/0xc2 Fixes: commit 8abfa9e22683 ("scsi: qla2xxx: Add function call to qpair for door bell") Signed-off-by: Bart Van Assche Cc: Quinn Tran Cc: Himanshu Madhani Acked-by: Himanshu Madhani Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/qla2xxx/qla_os.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index dce42a41687657..6eaaa326e508f8 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -388,7 +388,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list); ha->base_qpair->enable_class_2 = ql2xenableclass2; /* init qpair to this cpu. Will adjust at run time. */ - qla_cpu_update(rsp->qpair, smp_processor_id()); + qla_cpu_update(rsp->qpair, raw_smp_processor_id()); ha->base_qpair->pdev = ha->pdev; if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) From 49414387c4ac273cb7e62632269ea3bef2c7f422 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Wed, 11 Oct 2017 05:54:25 +0900 Subject: [PATCH 0160/3326] scsi: sd_zbc: Fix sd_zbc_read_zoned_characteristics() commit 4a109032e3941413d8a029f619543fc5aec1d26d upstream. The three values starting at byte 8 of the Zoned Block Device Characteristics VPD page B6h are 32 bits values, not 64bits. So use get_unaligned_be32() to retrieve the values and not get_unaligned_be64() Fixes: 89d947561077 ("sd: Implement support for ZBC devices") Signed-off-by: Damien Le Moal Reviewed-by: Bart Van Assche Reviewed-by: Johannes Thumshirn Reviewed-by: Christoph Hellwig Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/sd_zbc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 8aa54779aac1b4..2eb61d54bbb48f 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -375,15 +375,15 @@ static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp, if (sdkp->device->type != TYPE_ZBC) { /* Host-aware */ sdkp->urswrz = 1; - sdkp->zones_optimal_open = get_unaligned_be64(&buf[8]); - sdkp->zones_optimal_nonseq = get_unaligned_be64(&buf[12]); + sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]); + sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]); sdkp->zones_max_open = 0; } else { /* Host-managed */ sdkp->urswrz = buf[4] & 1; sdkp->zones_optimal_open = 0; sdkp->zones_optimal_nonseq = 0; - sdkp->zones_max_open = get_unaligned_be64(&buf[16]); + sdkp->zones_max_open = get_unaligned_be32(&buf[16]); } return 0; From 890d9fcd787e31d746070f6e4d5cdb674f8f1efc Mon Sep 17 00:00:00 2001 From: Dick Kennedy Date: Fri, 29 Sep 2017 17:34:27 -0700 Subject: [PATCH 0161/3326] scsi: lpfc: fix pci hot plug crash in timer management routines commit 1901762f2ca2747ed269239ca5332a8023ce4e3d upstream. During pci hot plug, the kernel crashes in timer management code. The sli4 remove_one handler is not stoping the timers as it starts to remove the port so that it can be swapped. Fix: Stop the timers early in the handler routine. Note: Fix in SLI-4 only. SLI-3 already stopped the timers properly. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_init.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 100bc4c8798d76..101ff3ddf11157 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -11420,6 +11420,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) lpfc_debugfs_terminate(vport); lpfc_sli4_hba_unset(phba); + lpfc_stop_hba_timers(phba); spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); From 6d697650498c0e540d2c1a23b07395ca7997055e Mon Sep 17 00:00:00 2001 From: Dick Kennedy Date: Fri, 29 Sep 2017 17:34:28 -0700 Subject: [PATCH 0162/3326] scsi: lpfc: fix pci hot plug crash in list_add call commit 401bb4169da655f3e5d28d0b208182e1ab60bf2a upstream. During pci hot plug, the kernel crashes in a list_add_call The lookup by tag function will return null if the IOCB is out of range or does not have the on txcmplq flag set. Fix: Check for null return from lookup by tag. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_sli.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8b119f87b51d67..b8513c1adcef46 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -12507,19 +12507,21 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, /* Look up the ELS command IOCB and create pseudo response IOCB */ cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, bf_get(lpfc_wcqe_c_request_tag, wcqe)); - /* Put the iocb back on the txcmplq */ - lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); - spin_unlock_irqrestore(&pring->ring_lock, iflags); - if (unlikely(!cmdiocbq)) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0386 ELS complete with no corresponding " - "cmdiocb: iotag (%d)\n", - bf_get(lpfc_wcqe_c_request_tag, wcqe)); + "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", + wcqe->word0, wcqe->total_data_placed, + wcqe->parameter, wcqe->word3); lpfc_sli_release_iocbq(phba, irspiocbq); return NULL; } + /* Put the iocb back on the txcmplq */ + lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + /* Fake the irspiocbq and copy necessary response information */ lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); @@ -17137,7 +17139,8 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, if (pcmd && pcmd->virt) dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); kfree(pcmd); - lpfc_sli_release_iocbq(phba, iocbq); + if (iocbq) + lpfc_sli_release_iocbq(phba, iocbq); lpfc_in_buf_free(phba, &dmabuf->dbuf); } From 27b1313ce913feeef151d167eabc4a8f175d3ad3 Mon Sep 17 00:00:00 2001 From: Dick Kennedy Date: Fri, 29 Sep 2017 17:34:29 -0700 Subject: [PATCH 0163/3326] scsi: lpfc: Fix crash receiving ELS while detaching driver commit 1234a6d54fed8a00091968c4eb2fb52e1cbb8e2e upstream. The driver crashes when attempting to use a freed ndpl pointer. The pci_remove_one handler runs on a separate kernel thread. The order of the removal is starting by freeing all of the ndlps and then disabling interrupts. In between these two events the driver can still receive an ELS and process it. When it tries to use the ndlp pointer will be NULL Change the order of the pci_remove_one vs disable interrupts so that interrupts are disabled before the ndlp's are freed. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_attr.c | 6 ++++-- drivers/scsi/lpfc/lpfc_bsg.c | 4 +++- drivers/scsi/lpfc/lpfc_els.c | 7 ++++++- drivers/scsi/lpfc/lpfc_hbadisc.c | 5 ++++- drivers/scsi/lpfc/lpfc_init.c | 14 +++++++------- drivers/scsi/lpfc/lpfc_nportdisc.c | 2 +- drivers/scsi/lpfc/lpfc_sli.c | 12 ++++++++++++ 7 files changed, 37 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c17677f494afe7..dc6519b2c53ad4 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -3134,7 +3134,8 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); - return snprintf(buf, PAGE_SIZE, "%d\n", pring->txq_max); + return snprintf(buf, PAGE_SIZE, "%d\n", + pring ? pring->txq_max : 0); } static DEVICE_ATTR(txq_hw, S_IRUGO, @@ -3147,7 +3148,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); - return snprintf(buf, PAGE_SIZE, "%d\n", pring->txcmplq_max); + return snprintf(buf, PAGE_SIZE, "%d\n", + pring ? pring->txcmplq_max : 0); } static DEVICE_ATTR(txcmplq_hw, S_IRUGO, diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index fe9e1c079c20fc..d89816222b2305 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2911,7 +2911,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, } } - if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { + if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) { ret_val = -ENOMEM; goto err_post_rxbufs_exit; } @@ -5421,6 +5421,8 @@ lpfc_bsg_timeout(struct bsg_job *job) struct lpfc_iocbq *check_iocb, *next_iocb; pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return -EIO; /* if job's driver data is NULL, the command completed or is in the * the process of completing. In this case, return status to request diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 468a66371de9d0..3ebf6ccba6e63c 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -7430,6 +7430,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) timeout = (uint32_t)(phba->fc_ratov << 1); pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return; if ((phba->pport->load_flag & FC_UNLOADING)) return; @@ -9310,6 +9312,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return; + spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, list) { @@ -9416,7 +9421,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, rxid, 1); /* Check if TXQ queue needs to be serviced */ - if (!(list_empty(&pring->txq))) + if (pring && !list_empty(&pring->txq)) lpfc_worker_wake_up(phba); return; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 20808349a80e99..499df9d1733913 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -3324,7 +3324,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Unblock ELS traffic */ pring = lpfc_phba_elsring(phba); - pring->flag &= ~LPFC_STOP_IOCB_EVENT; + if (pring) + pring->flag &= ~LPFC_STOP_IOCB_EVENT; /* Check for error */ if (mb->mbxStatus) { @@ -5430,6 +5431,8 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) psli = &phba->sli; pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return; /* Error matching iocb on txq or txcmplq * First check the txq. diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 101ff3ddf11157..6acf1bb1d32001 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -11404,6 +11404,13 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) /* Remove FC host and then SCSI host with the physical port */ fc_remove_host(shost); scsi_remove_host(shost); + /* + * Bring down the SLI Layer. This step disables all interrupts, + * clears the rings, discards all mailbox commands, and resets + * the HBA FCoE function. + */ + lpfc_debugfs_terminate(vport); + lpfc_sli4_hba_unset(phba); /* Perform ndlp cleanup on the physical port. The nvme and nvmet * localports are destroyed after to cleanup all transport memory. @@ -11412,13 +11419,6 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) lpfc_nvmet_destroy_targetport(phba); lpfc_nvme_destroy_localport(vport); - /* - * Bring down the SLI Layer. This step disables all interrupts, - * clears the rings, discards all mailbox commands, and resets - * the HBA FCoE function. - */ - lpfc_debugfs_terminate(vport); - lpfc_sli4_hba_unset(phba); lpfc_stop_hba_timers(phba); spin_lock_irq(&phba->hbalock); diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index f3ad7cac355d3e..b6957d944b9ac4 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -216,7 +216,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) pring = lpfc_phba_elsring(phba); /* In case of error recovery path, we might have a NULL pring here */ - if (!pring) + if (unlikely(!pring)) return; /* Abort outstanding I/O on NPort */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index b8513c1adcef46..9024521b97c723 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -10632,6 +10632,14 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) return 0; + if (!pring) { + if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) + cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; + else + cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; + goto abort_iotag_exit; + } + /* * If we're unloading, don't abort iocb on the ELS ring, but change * the callback so that nothing happens when it finishes. @@ -12500,6 +12508,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, unsigned long iflags; pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return NULL; wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; spin_lock_irqsave(&pring->ring_lock, iflags); @@ -18694,6 +18704,8 @@ lpfc_drain_txq(struct lpfc_hba *phba) uint32_t txq_cnt = 0; pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return 0; spin_lock_irqsave(&pring->ring_lock, iflags); list_for_each_entry(piocbq, &pring->txq, list) { From 55d8b65043e7726d907c99d0034ad29669f98167 Mon Sep 17 00:00:00 2001 From: Dick Kennedy Date: Fri, 29 Sep 2017 17:34:35 -0700 Subject: [PATCH 0164/3326] scsi: lpfc: Fix FCP hba_wqidx assignment commit 8e036a9497c5d565baafda4c648f2f372999a547 upstream. The driver is encountering oops in lpfc_sli_calc_ring. The driver is setting hba_wqidx for FCP based on the policy in use for NVME. The two may not be the same. Change to set the wqidx based on the FCP policy. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_sli.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 9024521b97c723..455f3ce9fda941 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -9396,10 +9396,13 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) * for abort iocb hba_wqidx should already * be setup based on what work queue we used. */ - if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) + if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { piocb->hba_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb->context1); + piocb->hba_wqidx = piocb->hba_wqidx % + phba->cfg_fcp_io_channel; + } return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring; } else { if (unlikely(!phba->sli4_hba.oas_wq)) From f5b7da1110a481f64f3a0530862a1ac19fc4d48d Mon Sep 17 00:00:00 2001 From: Dick Kennedy Date: Fri, 29 Sep 2017 17:34:39 -0700 Subject: [PATCH 0165/3326] scsi: lpfc: Fix oops if nvmet_fc_register_targetport fails commit e7981a2c725f8e237f749fa1358997707d57e32c upstream. if nvmet targetport registration fails, the driver encounters a NULL pointer oops in lpfc_hb_timeout_handler. To fix: if registration fails, ensure nvmet_support is cleared on the port structure. Also enhanced the log message on failure. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_nvmet.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 0b7c1a49e203f8..3c5b054a56ac44 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1138,9 +1138,14 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) #endif if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, - "6025 Cannot register NVME targetport " - "x%x\n", error); + "6025 Cannot register NVME targetport x%x: " + "portnm %llx nodenm %llx segs %d qs %d\n", + error, + pinfo.port_name, pinfo.node_name, + lpfc_tgttemplate.max_sgl_segments, + lpfc_tgttemplate.max_hw_queues); phba->targetport = NULL; + phba->nvmet_support = 0; lpfc_nvmet_cleanup_io_context(phba); @@ -1152,9 +1157,11 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6026 Registered NVME " "targetport: %p, private %p " - "portnm %llx nodenm %llx\n", + "portnm %llx nodenm %llx segs %d qs %d\n", phba->targetport, tgtp, - pinfo.port_name, pinfo.node_name); + pinfo.port_name, pinfo.node_name, + lpfc_tgttemplate.max_sgl_segments, + lpfc_tgttemplate.max_hw_queues); atomic_set(&tgtp->rcv_ls_req_in, 0); atomic_set(&tgtp->rcv_ls_req_out, 0); From 84ab17cf4f30740eff6247d2bf657f8ee527b034 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 27 Oct 2017 12:32:59 -0700 Subject: [PATCH 0166/3326] iscsi-target: Make TASK_REASSIGN use proper se_cmd->cmd_kref commit ae072726f6109bb1c94841d6fb3a82dde298ea85 upstream. Since commit 59b6986dbf fixed a potential NULL pointer dereference by allocating a se_tmr_req for ISCSI_TM_FUNC_TASK_REASSIGN, the se_tmr_req is currently leaked by iscsit_free_cmd() because no iscsi_cmd->se_cmd.se_tfo was associated. To address this, treat ISCSI_TM_FUNC_TASK_REASSIGN like any other TMR and call transport_init_se_cmd() + target_get_sess_cmd() to setup iscsi_cmd->se_cmd.se_tfo with se_cmd->cmd_kref of 2. This will ensure normal release operation once se_cmd->cmd_kref reaches zero and target_release_cmd_kref() is invoked, se_tmr_req will be released via existing target_free_cmd_mem() and core_tmr_release_req() code. Reported-by: Donald White Cc: Donald White Cc: Mike Christie Cc: Hannes Reinecke Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/iscsi/iscsi_target.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 5001261f5d69d7..c32d43d6ccdfa6 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1960,7 +1960,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct iscsi_tmr_req *tmr_req; struct iscsi_tm *hdr; int out_of_order_cmdsn = 0, ret; - bool sess_ref = false; u8 function, tcm_function = TMR_UNKNOWN; hdr = (struct iscsi_tm *) buf; @@ -1993,22 +1992,23 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, cmd->data_direction = DMA_NONE; cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL); - if (!cmd->tmr_req) + if (!cmd->tmr_req) { return iscsit_add_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); + } + + transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, + conn->sess->se_sess, 0, DMA_NONE, + TCM_SIMPLE_TAG, cmd->sense_buffer + 2); + + target_get_sess_cmd(&cmd->se_cmd, true); /* * TASK_REASSIGN for ERL=2 / connection stays inside of * LIO-Target $FABRIC_MOD */ if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { - transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, - conn->sess->se_sess, 0, DMA_NONE, - TCM_SIMPLE_TAG, cmd->sense_buffer + 2); - - target_get_sess_cmd(&cmd->se_cmd, true); - sess_ref = true; tcm_function = iscsit_convert_tmf(function); if (tcm_function == TMR_UNKNOWN) { pr_err("Unknown iSCSI TMR Function:" @@ -2124,12 +2124,8 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, * For connection recovery, this is also the default action for * TMR TASK_REASSIGN. */ - if (sess_ref) { - pr_debug("Handle TMR, using sess_ref=true check\n"); - target_put_sess_cmd(&cmd->se_cmd); - } - iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + target_put_sess_cmd(&cmd->se_cmd); return 0; } EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd); From 8009209a65167c899729e9a9d40fd3b809474c9d Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 27 Oct 2017 20:52:56 -0700 Subject: [PATCH 0167/3326] iscsi-target: Fix non-immediate TMR reference leak commit 3fc9fb13a4b2576aeab86c62fd64eb29ab68659c upstream. This patch fixes a se_cmd->cmd_kref reference leak that can occur when a non immediate TMR is proceeded our of command sequence number order, and CMDSN_LOWER_THAN_EXP is returned by iscsit_sequence_cmd(). To address this bug, call target_put_sess_cmd() during this special case following what iscsit_process_scsi_cmd() does upon CMDSN_LOWER_THAN_EXP. Cc: Mike Christie Cc: Hannes Reinecke Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/iscsi/iscsi_target.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index c32d43d6ccdfa6..d9ba4ee2c62b4b 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -2099,12 +2099,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); - if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) + if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) { out_of_order_cmdsn = 1; - else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) + } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + target_put_sess_cmd(&cmd->se_cmd); return 0; - else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) { return -1; + } } iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); From 31c913191db3c680e1b71547e8d46713103ee805 Mon Sep 17 00:00:00 2001 From: tangwenji Date: Wed, 16 Aug 2017 16:39:00 +0800 Subject: [PATCH 0168/3326] target: fix null pointer regression in core_tmr_drain_tmr_list commit 88fb2fa7db7510bf1078226ab48d162d9854f3d4 upstream. The target system kernel crash when the initiator executes the sg_persist -A command,because of the second argument to be set to NULL when core_tmr_lun_reset is called in core_scsi3_pro_preempt function. This fixes a regression originally introduced by: commit 51ec502a32665fed66c7f03799ede4023b212536 Author: Bart Van Assche Date: Tue Feb 14 16:25:54 2017 -0800 target: Delete tmr from list before processing Signed-off-by: tangwenji Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/target_core_tmr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index e22847bd79b95b..61909b23e9591b 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -217,7 +217,8 @@ static void core_tmr_drain_tmr_list( * LUN_RESET tmr.. */ spin_lock_irqsave(&dev->se_tmr_lock, flags); - list_del_init(&tmr->tmr_list); + if (tmr) + list_del_init(&tmr->tmr_list); list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { cmd = tmr_p->task_cmd; if (!cmd) { From 2ecb83e55b71c2831b96140aa564e6d72e956f0c Mon Sep 17 00:00:00 2001 From: tangwenji Date: Thu, 17 Aug 2017 19:51:54 +0800 Subject: [PATCH 0169/3326] target: fix buffer offset in core_scsi3_pri_read_full_status commit c58a252beb04cf0e02d6a746b2ed7ea89b6deb71 upstream. When at least two initiators register pr on the same LUN, the target returns the exception data due to buffer offset error, therefore the initiator executes command 'sg_persist -s' may cause the initiator to appear segfault error. This fixes a regression originally introduced by: commit a85d667e58bddf73be84d1981b41eaac985ed216 Author: Bart Van Assche Date: Tue May 23 16:48:27 2017 -0700 target: Use {get,put}_unaligned_be*() instead of open coding these functions Signed-off-by: tangwenji Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/target_core_pr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index dd2cd8048582ce..9f25c9c6f67da5 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -4011,6 +4011,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) * Set the ADDITIONAL DESCRIPTOR LENGTH */ put_unaligned_be32(desc_len, &buf[off]); + off += 4; /* * Size of full desctipor header minus TransportID * containing $FABRIC_MOD specific) initiator device/port From 66abe4fc44689abe5c8b23c22ea5a4092f46d7eb Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 22 Sep 2017 16:48:28 -0700 Subject: [PATCH 0170/3326] target: Fix QUEUE_FULL + SCSI task attribute handling commit 1c79df1f349fb6050016cea4ef1dfbc3853a5685 upstream. This patch fixes a bug during QUEUE_FULL where transport_complete_qf() calls transport_complete_task_attr() after it's already been invoked by target_complete_ok_work() or transport_generic_request_failure() during initial completion, preceeding QUEUE_FULL. This will result in se_device->simple_cmds, se_device->dev_cur_ordered_id and/or se_device->dev_ordered_sync being updated multiple times for a single se_cmd. To address this bug, clear SCF_TASK_ATTR_SET after the first call to transport_complete_task_attr(), and avoid updating SCSI task attribute related counters for any subsequent calls. Also, when a se_cmd is deferred due to ordered tags and executed via target_restart_delayed_cmds(), set CMD_T_SENT before execution matching what target_execute_cmd() does. Cc: Michael Cyr Cc: Bryant G. Ly Cc: Mike Christie Cc: Hannes Reinecke Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/target_core_transport.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 836d552b0385e9..576d081325f4c1 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2010,6 +2010,8 @@ static void target_restart_delayed_cmds(struct se_device *dev) list_del(&cmd->se_delayed_node); spin_unlock(&dev->delayed_cmd_lock); + cmd->transport_state |= CMD_T_SENT; + __target_execute_cmd(cmd, true); if (cmd->sam_task_attr == TCM_ORDERED_TAG) @@ -2045,6 +2047,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd) pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", dev->dev_cur_ordered_id); } + cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; + restart: target_restart_delayed_cmds(dev); } From 16870b7ba2855204af532c23fa973d530f4959b6 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 29 Sep 2017 16:03:24 -0700 Subject: [PATCH 0171/3326] target: Fix caw_sem leak in transport_generic_request_failure commit fd2f928b0ddd2fe8876d4f1344df2ace2b715a4d upstream. With the recent addition of transport_check_aborted_status() within transport_generic_request_failure() to avoid sending a SCSI status exception after CMD_T_ABORTED w/ TAS=1 has occured, it introduced a COMPARE_AND_WRITE early failure regression. Namely when COMPARE_AND_WRITE fails and se_device->caw_sem has been taken by sbc_compare_and_write(), if the new check for transport_check_aborted_status() returns true and exits, cmd->transport_complete_callback() -> compare_and_write_post() is skipped never releasing se_device->caw_sem. This regression was originally introduced by: commit e3b88ee95b4e4bf3e9729a4695d695b9c7c296c8 Author: Bart Van Assche Date: Tue Feb 14 16:25:45 2017 -0800 target: Fix handling of aborted failed commands To address this bug, move the transport_check_aborted_status() call after transport_complete_task_attr() and cmd->transport_complete_callback(). Cc: Mike Christie Cc: Hannes Reinecke Cc: Bart Van Assche Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/target_core_transport.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 576d081325f4c1..953b99d8654c95 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1730,9 +1730,6 @@ void transport_generic_request_failure(struct se_cmd *cmd, { int ret = 0, post_ret = 0; - if (transport_check_aborted_status(cmd, 1)) - return; - pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", sense_reason); target_show_cmd("-----[ ", cmd); @@ -1741,6 +1738,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, * For SAM Task Attribute emulation for failed struct se_cmd */ transport_complete_task_attr(cmd); + /* * Handle special case for COMPARE_AND_WRITE failure, where the * callback is expected to drop the per device ->caw_sem. @@ -1749,6 +1747,9 @@ void transport_generic_request_failure(struct se_cmd *cmd, cmd->transport_complete_callback) cmd->transport_complete_callback(cmd, false, &post_ret); + if (transport_check_aborted_status(cmd, 1)) + return; + switch (sense_reason) { case TCM_NON_EXISTENT_LUN: case TCM_UNSUPPORTED_SCSI_OPCODE: From 3c68944bda81603075e1205f066c9ff855d3ffe0 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 29 Sep 2017 16:43:11 -0700 Subject: [PATCH 0172/3326] target: Fix quiese during transport_write_pending_qf endless loop commit 9574a497df2bbc0a676b609ce0dd24d237cee3a6 upstream. This patch fixes a potential end-less loop during QUEUE_FULL, where cmd->se_tfo->write_pending() callback fails repeatedly but __transport_wait_for_tasks() has already been invoked to quiese the outstanding se_cmd descriptor. To address this bug, this patch adds a CMD_T_STOP|CMD_T_ABORTED check within transport_write_pending_qf() and invokes the existing se_cmd->t_transport_stop_comp to signal quiese completion back to __transport_wait_for_tasks(). Cc: Mike Christie Cc: Hannes Reinecke Cc: Bryant G. Ly Cc: Michael Cyr Cc: Potnuri Bharat Teja Cc: Sagi Grimberg Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/target_core_transport.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 953b99d8654c95..cbf0ff75be35d6 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2575,7 +2575,20 @@ EXPORT_SYMBOL(transport_generic_new_cmd); static void transport_write_pending_qf(struct se_cmd *cmd) { + unsigned long flags; int ret; + bool stop; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + if (stop) { + pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", + __func__, __LINE__, cmd->tag); + complete_all(&cmd->t_transport_stop_comp); + return; + } ret = cmd->se_tfo->write_pending(cmd); if (ret) { From fe1325027b4de33159853e0cf942abee58d01b74 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 27 Oct 2017 22:19:26 -0800 Subject: [PATCH 0173/3326] target: Avoid early CMD_T_PRE_EXECUTE failures during ABORT_TASK commit 1c21a48055a67ceb693e9c2587824a8de60a217c upstream. This patch fixes bug where early se_cmd exceptions that occur before backend execution can result in use-after-free if/when a subsequent ABORT_TASK occurs for the same tag. Since an early se_cmd exception will have had se_cmd added to se_session->sess_cmd_list via target_get_sess_cmd(), it will not have CMD_T_COMPLETE set by the usual target_complete_cmd() backend completion path. This causes a subsequent ABORT_TASK + __target_check_io_state() to signal ABORT_TASK should proceed. As core_tmr_abort_task() executes, it will bring the outstanding se_cmd->cmd_kref count down to zero releasing se_cmd, after se_cmd has already been queued with error status into fabric driver response path code. To address this bug, introduce a CMD_T_PRE_EXECUTE bit that is set at target_get_sess_cmd() time, and cleared immediately before backend driver dispatch in target_execute_cmd() once CMD_T_ACTIVE is set. Then, check CMD_T_PRE_EXECUTE within __target_check_io_state() to determine when an early exception has occured, and avoid aborting this se_cmd since it will have already been queued into fabric driver response path code. Reported-by: Donald White Cc: Donald White Cc: Mike Christie Cc: Hannes Reinecke Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/target_core_tmr.c | 9 +++++++++ drivers/target/target_core_transport.c | 2 ++ include/target/target_core_base.h | 1 + 3 files changed, 12 insertions(+) diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 61909b23e9591b..9c7bc1ca341a68 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -133,6 +133,15 @@ static bool __target_check_io_state(struct se_cmd *se_cmd, spin_unlock(&se_cmd->t_state_lock); return false; } + if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) { + if (se_cmd->scsi_status) { + pr_debug("Attempted to abort io tag: %llu early failure" + " status: 0x%02x\n", se_cmd->tag, + se_cmd->scsi_status); + spin_unlock(&se_cmd->t_state_lock); + return false; + } + } if (sess->sess_tearing_down || se_cmd->cmd_wait_set) { pr_debug("Attempted to abort io tag: %llu already shutdown," " skipping\n", se_cmd->tag); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index cbf0ff75be35d6..e6d51135d1055c 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1974,6 +1974,7 @@ void target_execute_cmd(struct se_cmd *cmd) } cmd->t_state = TRANSPORT_PROCESSING; + cmd->transport_state &= ~CMD_T_PRE_EXECUTE; cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); @@ -2682,6 +2683,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) ret = -ESHUTDOWN; goto out; } + se_cmd->transport_state |= CMD_T_PRE_EXECUTE; list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); out: spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index f5db145e68ecae..0d924e968c94b4 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -490,6 +490,7 @@ struct se_cmd { #define CMD_T_STOP (1 << 5) #define CMD_T_TAS (1 << 10) #define CMD_T_FABRIC_STOP (1 << 11) +#define CMD_T_PRE_EXECUTE (1 << 12) spinlock_t t_state_lock; struct kref cmd_kref; struct completion t_transport_stop_comp; From 7ad1a61d32af8a7d39dedcafaaf1f209237fa806 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sat, 11 Nov 2017 16:08:34 +0100 Subject: [PATCH 0174/3326] mtd: Avoid probe failures when mtd->dbg.dfs_dir is invalid commit 1530578abdac4edce9244c7a1962ded3ffdb58ce upstream. Commit e8e3edb95ce6 ("mtd: create per-device and module-scope debugfs entries") tried to make MTD related debugfs stuff consistent across the MTD framework by creating a root /mtd/ directory containing one directory per MTD device. The problem is that, by default, the MTD layer only registers the master device if no partitions are defined for this master. This behavior breaks all drivers that expect mtd->dbg.dfs_dir to be filled correctly after calling mtd_device_register() in order to add their own debugfs entries. The only way we can force all MTD masters to be registered no matter if they expose partitions or not is by enabling the CONFIG_MTD_PARTITIONED_MASTER option. In such situations, there's no other solution but to accept skipping debugfs initialization when dbg.dfs_dir is invalid, and when this happens, inform the user that he should consider enabling CONFIG_MTD_PARTITIONED_MASTER. Fixes: e8e3edb95ce6 ("mtd: create per-device and module-scope debugfs entries") Cc: Mario J. Rugiero Signed-off-by: Boris Brezillon Reported-by: Richard Weinberger Signed-off-by: Richard Weinberger Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/devices/docg3.c | 7 ++++++- drivers/mtd/nand/nandsim.c | 13 +++++++++---- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index 84b16133554beb..0806f72102c09d 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -1814,8 +1814,13 @@ static void __init doc_dbg_register(struct mtd_info *floor) struct dentry *root = floor->dbg.dfs_dir; struct docg3 *docg3 = floor->priv; - if (IS_ERR_OR_NULL(root)) + if (IS_ERR_OR_NULL(root)) { + if (IS_ENABLED(CONFIG_DEBUG_FS) && + !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) + dev_warn(floor->dev.parent, + "CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n"); return; + } debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3, &flashcontrol_fops); diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 246b4393118e4d..44322a363ba549 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -520,11 +520,16 @@ static int nandsim_debugfs_create(struct nandsim *dev) struct dentry *root = nsmtd->dbg.dfs_dir; struct dentry *dent; - if (!IS_ENABLED(CONFIG_DEBUG_FS)) + /* + * Just skip debugfs initialization when the debugfs directory is + * missing. + */ + if (IS_ERR_OR_NULL(root)) { + if (IS_ENABLED(CONFIG_DEBUG_FS) && + !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) + NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n"); return 0; - - if (IS_ERR_OR_NULL(root)) - return -1; + } dent = debugfs_create_file("nandsim_wear_report", S_IRUSR, root, dev, &dfs_fops); From 460bad5fe1ae2ebcd9cb73ad7b51538996b722b2 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Thu, 5 Oct 2017 18:53:19 +0200 Subject: [PATCH 0175/3326] mtd: nand: Export nand_reset() symbol commit b9bb98424c51437973b854691aa1e9b2bfd348f5 upstream. Commit 6e532afaca8e ("mtd: nand: atmel: Add PM ops") started to use the nand_reset() function which was not yet exported by the NAND framework (because it was only used internally before that). Export this symbol to avoid build errors when the driver is enabled as a module. Fixes: 6e532afaca8e ("mtd: nand: atmel: Add PM ops") Signed-off-by: Boris Brezillon Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/nand_base.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 12edaae17d81f2..fa451c1f30b3a1 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -1246,6 +1246,7 @@ int nand_reset(struct nand_chip *chip, int chipnr) return 0; } +EXPORT_SYMBOL_GPL(nand_reset); /** * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data From 54d8f6c8a454c5f177906d1a3a74b02113afc9bf Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Thu, 5 Oct 2017 18:57:24 +0200 Subject: [PATCH 0176/3326] mtd: nand: atmel: Actually use the PM ops commit 1533bfa6f6b6bcca1ea1f172ef4a1c5ce5e7b335 upstream. commit 6e532afaca8e ("mtd: nand: atmel: Add PM ops") was defining PM ops but nothing was using/referencing those PM ops. Fixes: 6e532afaca8e ("mtd: nand: atmel: Add PM ops") Cc: Romain Izard Signed-off-by: Boris Brezillon Acked-by: Wenyou Yang Tested-by: Romain Izard Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/atmel/nand-controller.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c index f25eca79f4e56d..68c9d98a3347c6 100644 --- a/drivers/mtd/nand/atmel/nand-controller.c +++ b/drivers/mtd/nand/atmel/nand-controller.c @@ -2547,6 +2547,7 @@ static struct platform_driver atmel_nand_controller_driver = { .driver = { .name = "atmel-nand-controller", .of_match_table = of_match_ptr(atmel_nand_controller_of_ids), + .pm = &atmel_nand_controller_pm_ops, }, .probe = atmel_nand_controller_probe, .remove = atmel_nand_controller_remove, From d54ca1d0ea19feb7aedd256b9f58a7478ef23819 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Fri, 20 Oct 2017 15:16:21 +0300 Subject: [PATCH 0177/3326] mtd: nand: omap2: Fix subpage write commit 739c64414f01748a36e7d82c8e0611dea94412bd upstream. Since v4.12, NAND subpage writes were causing a NULL pointer dereference on OMAP platforms (omap2-nand) using OMAP_ECC_BCH4_CODE_HW, OMAP_ECC_BCH8_CODE_HW and OMAP_ECC_BCH16_CODE_HW. This is because for those ECC modes, omap_calculate_ecc_bch() generates ECC bytes for the entire (multi-sector) page and this can overflow the ECC buffer provided by nand_write_subpage_hwecc() as it expects ecc.calculate() to return ECC bytes for just one sector. However, the root cause of the problem is present since v3.9 but was not seen then as NAND buffers were being allocated as one big chunk prior to commit 3deb9979c731 ("mtd: nand: allocate aligned buffers if NAND_OWN_BUFFERS is unset"). Fix the issue by providing a OMAP optimized write_subpage() implementation. Fixes: 62116e5171e0 ("mtd: nand: omap2: Support for hardware BCH error correction.") Signed-off-by: Roger Quadros Signed-off-by: Boris Brezillon Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/omap2.c | 339 ++++++++++++++++++++++++++------------- 1 file changed, 224 insertions(+), 115 deletions(-) diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 54540c8fa1a28e..9f98f74ff221af 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2, 0x97, 0x79, 0xe5, 0x24, 0xb5}; /** - * omap_calculate_ecc_bch - Generate bytes of ECC bytes + * _omap_calculate_ecc_bch - Generate ECC bytes for one sector * @mtd: MTD device structure * @dat: The pointer to data on which ecc is computed * @ecc_code: The ecc_code buffer + * @i: The sector number (for a multi sector page) * - * Support calculating of BCH4/8 ecc vectors for the page + * Support calculating of BCH4/8/16 ECC vectors for one sector + * within a page. Sector number is in @i. */ -static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd, - const u_char *dat, u_char *ecc_calc) +static int _omap_calculate_ecc_bch(struct mtd_info *mtd, + const u_char *dat, u_char *ecc_calc, int i) { struct omap_nand_info *info = mtd_to_omap(mtd); int eccbytes = info->nand.ecc.bytes; struct gpmc_nand_regs *gpmc_regs = &info->reg; u8 *ecc_code; - unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; + unsigned long bch_val1, bch_val2, bch_val3, bch_val4; u32 val; - int i, j; + int j; + + ecc_code = ecc_calc; + switch (info->ecc_opt) { + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: + case OMAP_ECC_BCH8_CODE_HW: + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); + bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); + bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); + *ecc_code++ = (bch_val4 & 0xFF); + *ecc_code++ = ((bch_val3 >> 24) & 0xFF); + *ecc_code++ = ((bch_val3 >> 16) & 0xFF); + *ecc_code++ = ((bch_val3 >> 8) & 0xFF); + *ecc_code++ = (bch_val3 & 0xFF); + *ecc_code++ = ((bch_val2 >> 24) & 0xFF); + *ecc_code++ = ((bch_val2 >> 16) & 0xFF); + *ecc_code++ = ((bch_val2 >> 8) & 0xFF); + *ecc_code++ = (bch_val2 & 0xFF); + *ecc_code++ = ((bch_val1 >> 24) & 0xFF); + *ecc_code++ = ((bch_val1 >> 16) & 0xFF); + *ecc_code++ = ((bch_val1 >> 8) & 0xFF); + *ecc_code++ = (bch_val1 & 0xFF); + break; + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: + case OMAP_ECC_BCH4_CODE_HW: + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); + *ecc_code++ = ((bch_val2 >> 12) & 0xFF); + *ecc_code++ = ((bch_val2 >> 4) & 0xFF); + *ecc_code++ = ((bch_val2 & 0xF) << 4) | + ((bch_val1 >> 28) & 0xF); + *ecc_code++ = ((bch_val1 >> 20) & 0xFF); + *ecc_code++ = ((bch_val1 >> 12) & 0xFF); + *ecc_code++ = ((bch_val1 >> 4) & 0xFF); + *ecc_code++ = ((bch_val1 & 0xF) << 4); + break; + case OMAP_ECC_BCH16_CODE_HW: + val = readl(gpmc_regs->gpmc_bch_result6[i]); + ecc_code[0] = ((val >> 8) & 0xFF); + ecc_code[1] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result5[i]); + ecc_code[2] = ((val >> 24) & 0xFF); + ecc_code[3] = ((val >> 16) & 0xFF); + ecc_code[4] = ((val >> 8) & 0xFF); + ecc_code[5] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result4[i]); + ecc_code[6] = ((val >> 24) & 0xFF); + ecc_code[7] = ((val >> 16) & 0xFF); + ecc_code[8] = ((val >> 8) & 0xFF); + ecc_code[9] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result3[i]); + ecc_code[10] = ((val >> 24) & 0xFF); + ecc_code[11] = ((val >> 16) & 0xFF); + ecc_code[12] = ((val >> 8) & 0xFF); + ecc_code[13] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result2[i]); + ecc_code[14] = ((val >> 24) & 0xFF); + ecc_code[15] = ((val >> 16) & 0xFF); + ecc_code[16] = ((val >> 8) & 0xFF); + ecc_code[17] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result1[i]); + ecc_code[18] = ((val >> 24) & 0xFF); + ecc_code[19] = ((val >> 16) & 0xFF); + ecc_code[20] = ((val >> 8) & 0xFF); + ecc_code[21] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result0[i]); + ecc_code[22] = ((val >> 24) & 0xFF); + ecc_code[23] = ((val >> 16) & 0xFF); + ecc_code[24] = ((val >> 8) & 0xFF); + ecc_code[25] = ((val >> 0) & 0xFF); + break; + default: + return -EINVAL; + } + + /* ECC scheme specific syndrome customizations */ + switch (info->ecc_opt) { + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: + /* Add constant polynomial to remainder, so that + * ECC of blank pages results in 0x0 on reading back + */ + for (j = 0; j < eccbytes; j++) + ecc_calc[j] ^= bch4_polynomial[j]; + break; + case OMAP_ECC_BCH4_CODE_HW: + /* Set 8th ECC byte as 0x0 for ROM compatibility */ + ecc_calc[eccbytes - 1] = 0x0; + break; + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: + /* Add constant polynomial to remainder, so that + * ECC of blank pages results in 0x0 on reading back + */ + for (j = 0; j < eccbytes; j++) + ecc_calc[j] ^= bch8_polynomial[j]; + break; + case OMAP_ECC_BCH8_CODE_HW: + /* Set 14th ECC byte as 0x0 for ROM compatibility */ + ecc_calc[eccbytes - 1] = 0x0; + break; + case OMAP_ECC_BCH16_CODE_HW: + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction + * @mtd: MTD device structure + * @dat: The pointer to data on which ecc is computed + * @ecc_code: The ecc_code buffer + * + * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used + * when SW based correction is required as ECC is required for one sector + * at a time. + */ +static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd, + const u_char *dat, u_char *ecc_calc) +{ + return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0); +} + +/** + * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors + * @mtd: MTD device structure + * @dat: The pointer to data on which ecc is computed + * @ecc_code: The ecc_code buffer + * + * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go. + */ +static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd, + const u_char *dat, u_char *ecc_calc) +{ + struct omap_nand_info *info = mtd_to_omap(mtd); + int eccbytes = info->nand.ecc.bytes; + unsigned long nsectors; + int i, ret; nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; for (i = 0; i < nsectors; i++) { - ecc_code = ecc_calc; - switch (info->ecc_opt) { - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: - case OMAP_ECC_BCH8_CODE_HW: - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); - bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); - bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); - *ecc_code++ = (bch_val4 & 0xFF); - *ecc_code++ = ((bch_val3 >> 24) & 0xFF); - *ecc_code++ = ((bch_val3 >> 16) & 0xFF); - *ecc_code++ = ((bch_val3 >> 8) & 0xFF); - *ecc_code++ = (bch_val3 & 0xFF); - *ecc_code++ = ((bch_val2 >> 24) & 0xFF); - *ecc_code++ = ((bch_val2 >> 16) & 0xFF); - *ecc_code++ = ((bch_val2 >> 8) & 0xFF); - *ecc_code++ = (bch_val2 & 0xFF); - *ecc_code++ = ((bch_val1 >> 24) & 0xFF); - *ecc_code++ = ((bch_val1 >> 16) & 0xFF); - *ecc_code++ = ((bch_val1 >> 8) & 0xFF); - *ecc_code++ = (bch_val1 & 0xFF); - break; - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: - case OMAP_ECC_BCH4_CODE_HW: - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); - *ecc_code++ = ((bch_val2 >> 12) & 0xFF); - *ecc_code++ = ((bch_val2 >> 4) & 0xFF); - *ecc_code++ = ((bch_val2 & 0xF) << 4) | - ((bch_val1 >> 28) & 0xF); - *ecc_code++ = ((bch_val1 >> 20) & 0xFF); - *ecc_code++ = ((bch_val1 >> 12) & 0xFF); - *ecc_code++ = ((bch_val1 >> 4) & 0xFF); - *ecc_code++ = ((bch_val1 & 0xF) << 4); - break; - case OMAP_ECC_BCH16_CODE_HW: - val = readl(gpmc_regs->gpmc_bch_result6[i]); - ecc_code[0] = ((val >> 8) & 0xFF); - ecc_code[1] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result5[i]); - ecc_code[2] = ((val >> 24) & 0xFF); - ecc_code[3] = ((val >> 16) & 0xFF); - ecc_code[4] = ((val >> 8) & 0xFF); - ecc_code[5] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result4[i]); - ecc_code[6] = ((val >> 24) & 0xFF); - ecc_code[7] = ((val >> 16) & 0xFF); - ecc_code[8] = ((val >> 8) & 0xFF); - ecc_code[9] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result3[i]); - ecc_code[10] = ((val >> 24) & 0xFF); - ecc_code[11] = ((val >> 16) & 0xFF); - ecc_code[12] = ((val >> 8) & 0xFF); - ecc_code[13] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result2[i]); - ecc_code[14] = ((val >> 24) & 0xFF); - ecc_code[15] = ((val >> 16) & 0xFF); - ecc_code[16] = ((val >> 8) & 0xFF); - ecc_code[17] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result1[i]); - ecc_code[18] = ((val >> 24) & 0xFF); - ecc_code[19] = ((val >> 16) & 0xFF); - ecc_code[20] = ((val >> 8) & 0xFF); - ecc_code[21] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result0[i]); - ecc_code[22] = ((val >> 24) & 0xFF); - ecc_code[23] = ((val >> 16) & 0xFF); - ecc_code[24] = ((val >> 8) & 0xFF); - ecc_code[25] = ((val >> 0) & 0xFF); - break; - default: - return -EINVAL; - } - - /* ECC scheme specific syndrome customizations */ - switch (info->ecc_opt) { - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: - /* Add constant polynomial to remainder, so that - * ECC of blank pages results in 0x0 on reading back */ - for (j = 0; j < eccbytes; j++) - ecc_calc[j] ^= bch4_polynomial[j]; - break; - case OMAP_ECC_BCH4_CODE_HW: - /* Set 8th ECC byte as 0x0 for ROM compatibility */ - ecc_calc[eccbytes - 1] = 0x0; - break; - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: - /* Add constant polynomial to remainder, so that - * ECC of blank pages results in 0x0 on reading back */ - for (j = 0; j < eccbytes; j++) - ecc_calc[j] ^= bch8_polynomial[j]; - break; - case OMAP_ECC_BCH8_CODE_HW: - /* Set 14th ECC byte as 0x0 for ROM compatibility */ - ecc_calc[eccbytes - 1] = 0x0; - break; - case OMAP_ECC_BCH16_CODE_HW: - break; - default: - return -EINVAL; - } + ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i); + if (ret) + return ret; - ecc_calc += eccbytes; + ecc_calc += eccbytes; } return 0; @@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, chip->write_buf(mtd, buf, mtd->writesize); /* Update ecc vector from GPMC result registers */ - chip->ecc.calculate(mtd, buf, &ecc_calc[0]); + omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]); ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, chip->ecc.total); @@ -1508,6 +1551,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, return 0; } +/** + * omap_write_subpage_bch - BCH hardware ECC based subpage write + * @mtd: mtd info structure + * @chip: nand chip info structure + * @offset: column address of subpage within the page + * @data_len: data length + * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB + * @page: page number to write + * + * OMAP optimized subpage write method. + */ +static int omap_write_subpage_bch(struct mtd_info *mtd, + struct nand_chip *chip, u32 offset, + u32 data_len, const u8 *buf, + int oob_required, int page) +{ + u8 *ecc_calc = chip->buffers->ecccalc; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + int ecc_steps = chip->ecc.steps; + u32 start_step = offset / ecc_size; + u32 end_step = (offset + data_len - 1) / ecc_size; + int step, ret = 0; + + /* + * Write entire page at one go as it would be optimal + * as ECC is calculated by hardware. + * ECC is calculated for all subpages but we choose + * only what we want. + */ + + /* Enable GPMC ECC engine */ + chip->ecc.hwctl(mtd, NAND_ECC_WRITE); + + /* Write data */ + chip->write_buf(mtd, buf, mtd->writesize); + + for (step = 0; step < ecc_steps; step++) { + /* mask ECC of un-touched subpages by padding 0xFF */ + if (step < start_step || step > end_step) + memset(ecc_calc, 0xff, ecc_bytes); + else + ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step); + + if (ret) + return ret; + + buf += ecc_size; + ecc_calc += ecc_bytes; + } + + /* copy calculated ECC for whole page to chip->buffer->oob */ + /* this include masked-value(0xFF) for unwritten subpages */ + ecc_calc = chip->buffers->ecccalc; + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; + + /* write OOB buffer to NAND device */ + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); + + return 0; +} + /** * omap_read_page_bch - BCH ecc based page read function for entire page * @mtd: mtd info structure @@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, chip->ecc.total); /* Calculate ecc bytes */ - chip->ecc.calculate(mtd, buf, ecc_calc); + omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc); ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, chip->ecc.total); @@ -2044,7 +2153,7 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 4; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = nand_bch_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw; mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); /* Reserve one byte for the OMAP marker */ oobbytes_per_step = nand_chip->ecc.bytes + 1; @@ -2066,9 +2175,9 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 4; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = omap_elm_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; + nand_chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = nand_chip->ecc.bytes; @@ -2087,7 +2196,7 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 8; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = nand_bch_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw; mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); /* Reserve one byte for the OMAP marker */ oobbytes_per_step = nand_chip->ecc.bytes + 1; @@ -2109,9 +2218,9 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 8; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = omap_elm_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; + nand_chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = nand_chip->ecc.bytes; @@ -2131,9 +2240,9 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 16; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = omap_elm_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; + nand_chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = nand_chip->ecc.bytes; From ff342113b7788420ae7da409e59afb8cd206b08d Mon Sep 17 00:00:00 2001 From: Brent Taylor Date: Mon, 30 Oct 2017 22:32:45 -0500 Subject: [PATCH 0178/3326] mtd: nand: Fix writing mtdoops to nand flash. commit 30863e38ebeb500a31cecee8096fb5002677dd9b upstream. When mtdoops calls mtd_panic_write(), it eventually calls panic_nand_write() in nand_base.c. In order to properly wait for the nand chip to be ready in panic_nand_wait(), the chip must first be selected. When using the atmel nand flash controller, a panic would occur due to a NULL pointer exception. Fixes: 2af7c6539931 ("mtd: Add panic_write for NAND flashes") Signed-off-by: Brent Taylor Signed-off-by: Boris Brezillon Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/nand_base.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index fa451c1f30b3a1..3f1d806e590a11 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2800,15 +2800,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const uint8_t *buf) { struct nand_chip *chip = mtd_to_nand(mtd); + int chipnr = (int)(to >> chip->chip_shift); struct mtd_oob_ops ops; int ret; - /* Wait for the device to get ready */ - panic_nand_wait(mtd, chip, 400); - /* Grab the device */ panic_nand_get_device(chip, mtd, FL_WRITING); + chip->select_chip(mtd, chipnr); + + /* Wait for the device to get ready */ + panic_nand_wait(mtd, chip, 400); + memset(&ops, 0, sizeof(ops)); ops.len = len; ops.datbuf = (uint8_t *)buf; From 39b3926aad065a371e6dba925d7835117a4b9c16 Mon Sep 17 00:00:00 2001 From: Xiaolei Li Date: Mon, 30 Oct 2017 10:39:56 +0800 Subject: [PATCH 0179/3326] mtd: nand: mtk: fix infinite ECC decode IRQ issue commit 1d2fcdcf33339c7c8016243de0f7f31cf6845e8d upstream. For MT2701 NAND Controller, there may generate infinite ECC decode IRQ during long time burn test on some platforms. Once this issue occurred, the ECC decode IRQ status cannot be cleared in the IRQ handler function, and threads cannot be scheduled. ECC HW generates decode IRQ each sector, so there will have more than one decode IRQ if read one page of large page NAND. Currently, ECC IRQ handle flow is that we will check whether it is decode IRQ at first by reading the register ECC_DECIRQ_STA. This is a read-clear type register. If this IRQ is decode IRQ, then the ECC IRQ signal will be cleared at the same time. Secondly, we will check whether all sectors are decoded by reading the register ECC_DECDONE. This is because the current IRQ may be not dealed in time, and the next sectors have been decoded before reading the register ECC_DECIRQ_STA. Then, the next sectors's decode IRQs will not be generated. Thirdly, if all sectors are decoded by comparing with ecc->sectors, then we will complete ecc->done, set ecc->sectors as 0, and disable ECC IRQ by programming the register ECC_IRQ_REG(op) as 0. Otherwise, wait for the next ECC IRQ. But, there is a timing issue between step one and two. When we read the reigster ECC_DECIRQ_STA, all sectors are decoded except the last sector, and the ECC IRQ signal is cleared. But the last sector is decoded before reading ECC_DECDONE, so the ECC IRQ signal is enabled again by ECC HW, and it means we will receive one extra ECC IRQ later. In step three, we will find that all sectors were decoded, then disable ECC IRQ and return. When deal with the extra ECC IRQ, the ECC IRQ status cannot be cleared anymore. That is because the register ECC_DECIRQ_STA can only be cleared when the register ECC_IRQ_REG(op) is enabled. But actually we have disabled ECC IRQ in the previous ECC IRQ handle. So, there will keep receiving ECC decode IRQ. Now, we read the register ECC_DECIRQ_STA once again before completing the ecc done event. This ensures that there will be no extra ECC decode IRQ. Also, remove writel(0, ecc->regs + ECC_IRQ_REG(op)) from irq handler, because ECC IRQ is disabled in mtk_ecc_disable(). And clear ECC_DECIRQ_STA in mtk_ecc_disable() in case there is a timeout to wait decode IRQ. Fixes: 1d6b1e464950 ("mtd: mediatek: driver for MTK Smart Device") Signed-off-by: Xiaolei Li Signed-off-by: Boris Brezillon Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/mtk_ecc.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c index 7f3b065b6b8fe7..c51d214d169ea4 100644 --- a/drivers/mtd/nand/mtk_ecc.c +++ b/drivers/mtd/nand/mtk_ecc.c @@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id) op = ECC_DECODE; dec = readw(ecc->regs + ECC_DECDONE); if (dec & ecc->sectors) { + /* + * Clear decode IRQ status once again to ensure that + * there will be no extra IRQ. + */ + readw(ecc->regs + ECC_DECIRQ_STA); ecc->sectors = 0; complete(&ecc->done); } else { @@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id) } } - writel(0, ecc->regs + ECC_IRQ_REG(op)); - return IRQ_HANDLED; } @@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc) /* disable it */ mtk_ecc_wait_idle(ecc, op); + if (op == ECC_DECODE) + /* + * Clear decode IRQ status in case there is a timeout to wait + * decode IRQ. + */ + readw(ecc->regs + ECC_DECIRQ_STA); writew(0, ecc->regs + ECC_IRQ_REG(op)); writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); From 90b54bccdd9793d6e906baeb5975afa337987db8 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Tue, 3 Oct 2017 10:51:48 +0530 Subject: [PATCH 0180/3326] mailbox: bcm-flexrm-mailbox: Fix FlexRM ring flush sequence commit a371c10ea4b38a5f120e86d906d404d50a0f4660 upstream. As-per suggestion from FlexRM HW folks, we have to first set FlexRM ring flush state and then clear it for FlexRM ring flush to work properly. Currently, the FlexRM driver has incomplete FlexRM ring flush sequence which causes repeated insmod+rmmod of mailbox client drivers to fail. This patch fixes FlexRM ring flush sequence in flexrm_shutdown() as described above. Fixes: dbc049eee730 ("mailbox: Add driver for Broadcom FlexRM ring manager") Signed-off-by: Anup Patel Reviewed-by: Scott Branden Signed-off-by: Jassi Brar Signed-off-by: Greg Kroah-Hartman --- drivers/mailbox/bcm-flexrm-mailbox.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index ae6146311934dc..f052a3eb209839 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c @@ -1365,8 +1365,8 @@ static void flexrm_shutdown(struct mbox_chan *chan) /* Disable/inactivate ring */ writel_relaxed(0x0, ring->regs + RING_CONTROL); - /* Flush ring with timeout of 1s */ - timeout = 1000; + /* Set ring flush state */ + timeout = 1000; /* timeout of 1s */ writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), ring->regs + RING_CONTROL); do { @@ -1374,7 +1374,23 @@ static void flexrm_shutdown(struct mbox_chan *chan) FLUSH_DONE_MASK) break; mdelay(1); - } while (timeout--); + } while (--timeout); + if (!timeout) + dev_err(ring->mbox->dev, + "setting ring%d flush state timedout\n", ring->num); + + /* Clear ring flush state */ + timeout = 1000; /* timeout of 1s */ + writel_relaxed(0x0, ring + RING_CONTROL); + do { + if (!(readl_relaxed(ring + RING_FLUSH_DONE) & + FLUSH_DONE_MASK)) + break; + mdelay(1); + } while (--timeout); + if (!timeout) + dev_err(ring->mbox->dev, + "clearing ring%d flush state timedout\n", ring->num); /* Abort all in-flight requests */ for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) { From bbf6614675482ae339510e331c2350455b2292b8 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 26 Sep 2017 17:11:33 +0200 Subject: [PATCH 0181/3326] p54: don't unregister leds when they are not initialized commit fc09785de0a364427a5df63d703bae9a306ed116 upstream. ieee80211_register_hw() in p54_register_common() may fail and leds won't get initialized. Currently p54_unregister_common() doesn't check that and always calls p54_unregister_leds(). The fix is to check priv->registered flag before calling p54_unregister_leds(). Found by syzkaller. INFO: trying to register non-static key. the code is fine but needs lockdep annotation. turning off the locking correctness validator. CPU: 1 PID: 1404 Comm: kworker/1:1 Not tainted 4.14.0-rc1-42251-gebb2c2437d80-dirty #205 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Workqueue: usb_hub_wq hub_event Call Trace: __dump_stack lib/dump_stack.c:16 dump_stack+0x292/0x395 lib/dump_stack.c:52 register_lock_class+0x6c4/0x1a00 kernel/locking/lockdep.c:769 __lock_acquire+0x27e/0x4550 kernel/locking/lockdep.c:3385 lock_acquire+0x259/0x620 kernel/locking/lockdep.c:4002 flush_work+0xf0/0x8c0 kernel/workqueue.c:2886 __cancel_work_timer+0x51d/0x870 kernel/workqueue.c:2961 cancel_delayed_work_sync+0x1f/0x30 kernel/workqueue.c:3081 p54_unregister_leds+0x6c/0xc0 drivers/net/wireless/intersil/p54/led.c:160 p54_unregister_common+0x3d/0xb0 drivers/net/wireless/intersil/p54/main.c:856 p54u_disconnect+0x86/0x120 drivers/net/wireless/intersil/p54/p54usb.c:1073 usb_unbind_interface+0x21c/0xa90 drivers/usb/core/driver.c:423 __device_release_driver drivers/base/dd.c:861 device_release_driver_internal+0x4f4/0x5c0 drivers/base/dd.c:893 device_release_driver+0x1e/0x30 drivers/base/dd.c:918 bus_remove_device+0x2f4/0x4b0 drivers/base/bus.c:565 device_del+0x5c4/0xab0 drivers/base/core.c:1985 usb_disable_device+0x1e9/0x680 drivers/usb/core/message.c:1170 usb_disconnect+0x260/0x7a0 drivers/usb/core/hub.c:2124 hub_port_connect drivers/usb/core/hub.c:4754 hub_port_connect_change drivers/usb/core/hub.c:5009 port_event drivers/usb/core/hub.c:5115 hub_event+0x1318/0x3740 drivers/usb/core/hub.c:5195 process_one_work+0xc7f/0x1db0 kernel/workqueue.c:2119 process_scheduled_works kernel/workqueue.c:2179 worker_thread+0xb2b/0x1850 kernel/workqueue.c:2255 kthread+0x3a1/0x470 kernel/kthread.c:231 ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431 Signed-off-by: Andrey Konovalov Acked-by: Christian Lamparter Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intersil/p54/main.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c index d5a3bf91a03e7b..ab6d39e1206951 100644 --- a/drivers/net/wireless/intersil/p54/main.c +++ b/drivers/net/wireless/intersil/p54/main.c @@ -852,12 +852,11 @@ void p54_unregister_common(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; -#ifdef CONFIG_P54_LEDS - p54_unregister_leds(priv); -#endif /* CONFIG_P54_LEDS */ - if (priv->registered) { priv->registered = false; +#ifdef CONFIG_P54_LEDS + p54_unregister_leds(priv); +#endif /* CONFIG_P54_LEDS */ ieee80211_unregister_hw(dev); } From 77a38e88ccbc9e6451333f742b18d0fe5176c5cd Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 19 Oct 2017 10:00:48 -0700 Subject: [PATCH 0182/3326] block: Fix a race between blk_cleanup_queue() and timeout handling commit 4e9b6f20828ac880dbc1fa2fdbafae779473d1af upstream. Make sure that if the timeout timer fires after a queue has been marked "dying" that the affected requests are finished. Reported-by: chenxiang (M) Fixes: commit 287922eb0b18 ("block: defer timeouts to a workqueue") Signed-off-by: Bart Van Assche Tested-by: chenxiang (M) Cc: Christoph Hellwig Cc: Keith Busch Cc: Hannes Reinecke Cc: Ming Lei Cc: Johannes Thumshirn Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- block/blk-core.c | 2 ++ block/blk-timeout.c | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 048be4aa602446..33ee583cfe45d1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -333,6 +333,7 @@ EXPORT_SYMBOL(blk_stop_queue); void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->timeout); + cancel_work_sync(&q->timeout_work); if (q->mq_ops) { struct blk_mq_hw_ctx *hctx; @@ -844,6 +845,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) setup_timer(&q->backing_dev_info->laptop_mode_wb_timer, laptop_mode_timer_fn, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); + INIT_WORK(&q->timeout_work, NULL); INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->icq_list); diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 17ec83bb09002c..6427be7ac36375 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -134,8 +134,6 @@ void blk_timeout_work(struct work_struct *work) struct request *rq, *tmp; int next_set = 0; - if (blk_queue_enter(q, true)) - return; spin_lock_irqsave(q->queue_lock, flags); list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) @@ -145,7 +143,6 @@ void blk_timeout_work(struct work_struct *work) mod_timer(&q->timeout, round_jiffies_up(next)); spin_unlock_irqrestore(q->queue_lock, flags); - blk_queue_exit(q); } /** From d92105f93aeb2c80f6840bed9af71d3f97321916 Mon Sep 17 00:00:00 2001 From: Nate Dailey Date: Tue, 17 Oct 2017 08:17:03 -0400 Subject: [PATCH 0183/3326] raid1: prevent freeze_array/wait_all_barriers deadlock commit f6eca2d43ed694ab8124dd24c88277f7eca93b7d upstream. If freeze_array is attempted in the middle of close_sync/ wait_all_barriers, deadlock can occur. freeze_array will wait for nr_pending and nr_queued to line up. wait_all_barriers increments nr_pending for each barrier bucket, one at a time, but doesn't actually issue IO that could be counted in nr_queued. So freeze_array is blocked until wait_all_barriers completes and allow_all_barriers runs. At the same time, when _wait_barrier sees array_frozen == 1, it stops and waits for freeze_array to complete. Prevent the deadlock by making close_sync call _wait_barrier and _allow_barrier for one bucket at a time, instead of deferring the _allow_barrier calls until after all _wait_barriers are complete. Signed-off-by: Nate Dailey Fix: fd76863e37fe(RAID1: a new I/O barrier implementation to remove resync window) Reviewed-by: Coly Li Signed-off-by: Shaohua Li Signed-off-by: Greg Kroah-Hartman --- drivers/md/raid1.c | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f3f3e40dc9d8fc..e4e8f9e565b71f 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -990,14 +990,6 @@ static void wait_barrier(struct r1conf *conf, sector_t sector_nr) _wait_barrier(conf, idx); } -static void wait_all_barriers(struct r1conf *conf) -{ - int idx; - - for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) - _wait_barrier(conf, idx); -} - static void _allow_barrier(struct r1conf *conf, int idx) { atomic_dec(&conf->nr_pending[idx]); @@ -1011,14 +1003,6 @@ static void allow_barrier(struct r1conf *conf, sector_t sector_nr) _allow_barrier(conf, idx); } -static void allow_all_barriers(struct r1conf *conf) -{ - int idx; - - for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) - _allow_barrier(conf, idx); -} - /* conf->resync_lock should be held */ static int get_unqueued_pending(struct r1conf *conf) { @@ -1654,8 +1638,12 @@ static void print_conf(struct r1conf *conf) static void close_sync(struct r1conf *conf) { - wait_all_barriers(conf); - allow_all_barriers(conf); + int idx; + + for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) { + _wait_barrier(conf, idx); + _allow_barrier(conf, idx); + } mempool_destroy(conf->r1buf_pool); conf->r1buf_pool = NULL; From c01dd3addb99763551f2717d1c7c6d20bd9491de Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 9 Nov 2017 14:17:59 +0000 Subject: [PATCH 0184/3326] genirq: Track whether the trigger type has been set commit 4f8413a3a799c958f7a10a6310a451e6b8aef5ad upstream. When requesting a shared interrupt, we assume that the firmware support code (DT or ACPI) has called irqd_set_trigger_type already, so that we can retrieve it and check that the requester is being reasonnable. Unfortunately, we still have non-DT, non-ACPI systems around, and these guys won't call irqd_set_trigger_type before requesting the interrupt. The consequence is that we fail the request that would have worked before. We can either chase all these use cases (boring), or address it in core code (easier). Let's have a per-irq_desc flag that indicates whether irqd_set_trigger_type has been called, and let's just check it when checking for a shared interrupt. If it hasn't been set, just take whatever the interrupt requester asks. Fixes: 382bd4de6182 ("genirq: Use irqd_get_trigger_type to compare the trigger type for shared IRQs") Reported-and-tested-by: Petr Cvek Signed-off-by: Marc Zyngier Signed-off-by: Greg Kroah-Hartman --- include/linux/irq.h | 11 ++++++++++- kernel/irq/manage.c | 13 ++++++++++++- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/include/linux/irq.h b/include/linux/irq.h index 4536286cc4d24b..0d53626405bf85 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -211,6 +211,7 @@ struct irq_data { * IRQD_MANAGED_SHUTDOWN - Interrupt was shutdown due to empty affinity * mask. Applies only to affinity managed irqs. * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target + * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -231,6 +232,7 @@ enum { IRQD_IRQ_STARTED = (1 << 22), IRQD_MANAGED_SHUTDOWN = (1 << 23), IRQD_SINGLE_TARGET = (1 << 24), + IRQD_DEFAULT_TRIGGER_SET = (1 << 25), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -260,18 +262,25 @@ static inline void irqd_mark_affinity_was_set(struct irq_data *d) __irqd_to_state(d) |= IRQD_AFFINITY_SET; } +static inline bool irqd_trigger_type_was_set(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET; +} + static inline u32 irqd_get_trigger_type(struct irq_data *d) { return __irqd_to_state(d) & IRQD_TRIGGER_MASK; } /* - * Must only be called inside irq_chip.irq_set_type() functions. + * Must only be called inside irq_chip.irq_set_type() functions or + * from the DT/ACPI setup code. */ static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) { __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK; __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK; + __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET; } static inline bool irqd_is_level_type(struct irq_data *d) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 4bff6a10ae8ec7..b02caa442776f6 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1245,7 +1245,18 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) * set the trigger type must match. Also all must * agree on ONESHOT. */ - unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data); + unsigned int oldtype; + + /* + * If nobody did set the configuration before, inherit + * the one provided by the requester. + */ + if (irqd_trigger_type_was_set(&desc->irq_data)) { + oldtype = irqd_get_trigger_type(&desc->irq_data); + } else { + oldtype = new->flags & IRQF_TRIGGER_MASK; + irqd_set_trigger_type(&desc->irq_data, oldtype); + } if (!((old->flags & new->flags) & IRQF_SHARED) || (oldtype != (new->flags & IRQF_TRIGGER_MASK)) || From ca655bc65e830819ded1b7a0b51a3beb204103a2 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Sat, 11 Nov 2017 17:51:25 +0100 Subject: [PATCH 0185/3326] irqchip/gic-v3: Fix ppi-partitions lookup commit 00ee9a1ca5080202bc37b44e998c3b2c74d45817 upstream. Fix child-node lookup during initialisation, which ended up searching the whole device tree depth-first starting at the parent rather than just matching on its children. To make things worse, the parent gic node was prematurely freed, while the ppi-partitions node was leaked. Fixes: e3825ba1af3a ("irqchip/gic-v3: Add support for partitioned PPIs") Signed-off-by: Johan Hovold Signed-off-by: Marc Zyngier Signed-off-by: Greg Kroah-Hartman --- drivers/irqchip/irq-gic-v3.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index b5df99c6f680f9..3b35271114eef1 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1071,18 +1071,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) int nr_parts; struct partition_affinity *parts; - parts_node = of_find_node_by_name(gic_node, "ppi-partitions"); + parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); if (!parts_node) return; nr_parts = of_get_child_count(parts_node); if (!nr_parts) - return; + goto out_put_node; parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL); if (WARN_ON(!parts)) - return; + goto out_put_node; for_each_child_of_node(parts_node, child_part) { struct partition_affinity *part; @@ -1149,6 +1149,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) gic_data.ppi_descs[i] = desc; } + +out_put_node: + of_node_put(parts_node); } static void __init gic_of_setup_kvm_info(struct device_node *node) From 665e6619705a19910ab3aaa81eb34343e9628da4 Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Fri, 20 Oct 2017 17:33:18 +0300 Subject: [PATCH 0186/3326] lockd: double unregister of inetaddr notifiers commit dc3033e16c59a2c4e62b31341258a5786cbcee56 upstream. lockd_up() can call lockd_unregister_notifiers twice: inside lockd_start_svc() when it calls lockd_svc_exit_thread() and then in error path of lockd_up() Patch forces lockd_start_svc() to unregister notifiers in all error cases and removes extra unregister in error path of lockd_up(). Fixes: cb7d224f82e4 "lockd: unregister notifier blocks if the service ..." Signed-off-by: Vasily Averin Reviewed-by: Jeff Layton Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- fs/lockd/svc.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index b995bdc1397684..f04ecfc7ece0c9 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -369,6 +369,7 @@ static int lockd_start_svc(struct svc_serv *serv) printk(KERN_WARNING "lockd_up: svc_rqst allocation failed, error=%d\n", error); + lockd_unregister_notifiers(); goto out_rqst; } @@ -459,13 +460,16 @@ int lockd_up(struct net *net) } error = lockd_up_net(serv, net); - if (error < 0) - goto err_net; + if (error < 0) { + lockd_unregister_notifiers(); + goto err_put; + } error = lockd_start_svc(serv); - if (error < 0) - goto err_start; - + if (error < 0) { + lockd_down_net(serv, net); + goto err_put; + } nlmsvc_users++; /* * Note: svc_serv structures have an initial use count of 1, @@ -476,12 +480,6 @@ int lockd_up(struct net *net) err_create: mutex_unlock(&nlmsvc_mutex); return error; - -err_start: - lockd_down_net(serv, net); -err_net: - lockd_unregister_notifiers(); - goto err_put; } EXPORT_SYMBOL_GPL(lockd_up); From f5073bc7c18358d08bc9dd39458bc469dc9e978d Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 26 Oct 2017 17:00:22 +1100 Subject: [PATCH 0187/3326] KVM: PPC: Book3S HV: Don't call real-mode XICS hypercall handlers if not enabled commit 00bb6ae5006205e041ce9784c819460562351d47 upstream. When running a guest on a POWER9 system with the in-kernel XICS emulation disabled (for example by running QEMU with the parameter "-machine pseries,kernel_irqchip=off"), the kernel does not pass the XICS-related hypercalls such as H_CPPR up to userspace for emulation there as it should. The reason for this is that the real-mode handlers for these hypercalls don't check whether a XICS device has been instantiated before calling the xics-on-xive code. That code doesn't check either, leading to potential NULL pointer dereferences because vcpu->arch.xive_vcpu is NULL. Those dereferences won't cause an exception in real mode but will lead to kernel memory corruption. This fixes it by adding kvmppc_xics_enabled() checks before calling the XICS functions. Fixes: 5af50993850a ("KVM: PPC: Book3S HV: Native usage of the XIVE interrupt controller") Signed-off-by: Paul Mackerras Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kvm/book3s_hv_builtin.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 90644db9d38e21..8e0cf8f186dfde 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -529,6 +529,8 @@ static inline bool is_rm(void) unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_xirr(vcpu); @@ -541,6 +543,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; vcpu->arch.gpr[5] = get_tb(); if (xive_enabled()) { if (is_rm()) @@ -554,6 +558,8 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_ipoll(vcpu, server); @@ -567,6 +573,8 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, unsigned long mfrr) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_ipi(vcpu, server, mfrr); @@ -579,6 +587,8 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_cppr(vcpu, cppr); @@ -591,6 +601,8 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_eoi(vcpu, xirr); From 0b09ee40aae7ca0c4a7956dcbc5361c5ef7ccf2e Mon Sep 17 00:00:00 2001 From: Ladi Prosek Date: Wed, 11 Oct 2017 16:54:42 +0200 Subject: [PATCH 0188/3326] KVM: nVMX: set IDTR and GDTR limits when loading L1 host state commit 21f2d551183847bc7fbe8d866151d00cdad18752 upstream. Intel SDM 27.5.2 Loading Host Segment and Descriptor-Table Registers: "The GDTR and IDTR limits are each set to FFFFH." Signed-off-by: Ladi Prosek Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a6f4f095f8f4eb..41794e287f410e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11325,6 +11325,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); + vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); + vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) From 04d7f0cfd5a50cab9edfd73ceca460de5bdd55ac Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 26 Oct 2017 09:13:27 +0200 Subject: [PATCH 0189/3326] KVM: SVM: obey guest PAT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 15038e14724799b8c205beb5f20f9e54896013c3 upstream. For many years some users of assigned devices have reported worse performance on AMD processors with NPT than on AMD without NPT, Intel or bare metal. The reason turned out to be that SVM is discarding the guest PAT setting and uses the default (PA0=PA4=WB, PA1=PA5=WT, PA2=PA6=UC-, PA3=UC). The guest might be using a different setting, and especially might want write combining but isn't getting it (instead getting slow UC or UC- accesses). Thanks a lot to geoff@hostfission.com for noticing the relation to the g_pat setting. The patch has been tested also by a bunch of people on VFIO users forums. Fixes: 709ddebf81cb40e3c36c6109a7892e8b93a09464 Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=196409 Signed-off-by: Paolo Bonzini Reviewed-by: David Hildenbrand Tested-by: Nick Sarnie Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/svm.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 0e68f0b3cbf720..ca209a4a783418 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3657,6 +3657,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) u32 ecx = msr->index; u64 data = msr->data; switch (ecx) { + case MSR_IA32_CR_PAT: + if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) + return 1; + vcpu->arch.pat = data; + svm->vmcb->save.g_pat = data; + mark_dirty(svm->vmcb, VMCB_NPT); + break; case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; From a77360e989f3dc06e4f177a0837d533d13a20d91 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 6 Nov 2017 13:31:12 +0100 Subject: [PATCH 0190/3326] kvm: vmx: Reinstate support for CPUs without virtual NMI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 8a1b43922d0d1279e7936ba85c4c2a870403c95f upstream. This is more or less a revert of commit 2c82878b0cb3 ("KVM: VMX: require virtual NMI support", 2017-03-27); it turns out that Core 2 Duo machines only had virtual NMIs in some SKUs. The revert is not trivial because in the meanwhile there have been several fixes to nested NMI injection. Therefore, the entire vNMI state is moved to struct loaded_vmcs. Another change compared to before the patch is a simplification here: if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked && !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis( get_vmcs12(vcpu))))) { The final condition here is always true (because nested_cpu_has_virtual_nmis is always false) and is removed. Fixes: 2c82878b0cb38fd516fd612c67852a6bbf282003 Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1490803 Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 150 ++++++++++++++++++++++++++++++++------------- 1 file changed, 106 insertions(+), 44 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 41794e287f410e..21cad7068cbfac 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -202,6 +202,10 @@ struct loaded_vmcs { bool nmi_known_unmasked; unsigned long vmcs_host_cr3; /* May not match real cr3 */ unsigned long vmcs_host_cr4; /* May not match real cr4 */ + /* Support for vnmi-less CPUs */ + int soft_vnmi_blocked; + ktime_t entry_time; + s64 vnmi_blocked_time; struct list_head loaded_vmcss_on_cpu_link; }; @@ -1286,6 +1290,11 @@ static inline bool cpu_has_vmx_invpcid(void) SECONDARY_EXEC_ENABLE_INVPCID; } +static inline bool cpu_has_virtual_nmis(void) +{ + return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; +} + static inline bool cpu_has_vmx_wbinvd_exit(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & @@ -1343,11 +1352,6 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) (vmcs12->secondary_vm_exec_control & bit); } -static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) -{ - return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; -} - static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & @@ -3699,9 +3703,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) &_vmexit_control) < 0) return -EIO; - min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | - PIN_BASED_VIRTUAL_NMIS; - opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER; + min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; + opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | + PIN_BASED_VMX_PREEMPTION_TIMER; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, &_pin_based_exec_control) < 0) return -EIO; @@ -5667,7 +5671,8 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) static void enable_nmi_window(struct kvm_vcpu *vcpu) { - if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { + if (!cpu_has_virtual_nmis() || + vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { enable_irq_window(vcpu); return; } @@ -5707,6 +5712,19 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); + if (!cpu_has_virtual_nmis()) { + /* + * Tracking the NMI-blocked state in software is built upon + * finding the next open IRQ window. This, in turn, depends on + * well-behaving guests: They have to keep IRQs disabled at + * least as long as the NMI handler runs. Otherwise we may + * cause NMI nesting, maybe breaking the guest. But as this is + * highly unlikely, we can live with the residual risk. + */ + vmx->loaded_vmcs->soft_vnmi_blocked = 1; + vmx->loaded_vmcs->vnmi_blocked_time = 0; + } + ++vcpu->stat.nmi_injections; vmx->loaded_vmcs->nmi_known_unmasked = false; @@ -5725,6 +5743,8 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); bool masked; + if (!cpu_has_virtual_nmis()) + return vmx->loaded_vmcs->soft_vnmi_blocked; if (vmx->loaded_vmcs->nmi_known_unmasked) return false; masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; @@ -5736,13 +5756,20 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) { struct vcpu_vmx *vmx = to_vmx(vcpu); - vmx->loaded_vmcs->nmi_known_unmasked = !masked; - if (masked) - vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, - GUEST_INTR_STATE_NMI); - else - vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, - GUEST_INTR_STATE_NMI); + if (!cpu_has_virtual_nmis()) { + if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { + vmx->loaded_vmcs->soft_vnmi_blocked = masked; + vmx->loaded_vmcs->vnmi_blocked_time = 0; + } + } else { + vmx->loaded_vmcs->nmi_known_unmasked = !masked; + if (masked) + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_NMI); + else + vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_NMI); + } } static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) @@ -5750,6 +5777,10 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) if (to_vmx(vcpu)->nested.nested_run_pending) return 0; + if (!cpu_has_virtual_nmis() && + to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) + return 0; + return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI | GUEST_INTR_STATE_NMI)); @@ -6478,6 +6509,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) * AAK134, BY25. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && + cpu_has_virtual_nmis() && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); @@ -6961,7 +6993,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) } /* Create a new VMCS */ - item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL); + item = kzalloc(sizeof(struct vmcs02_list), GFP_KERNEL); if (!item) return NULL; item->vmcs02.vmcs = alloc_vmcs(); @@ -7978,6 +8010,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu) * "blocked by NMI" bit has to be set before next VM entry. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && + cpu_has_virtual_nmis() && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); @@ -8822,6 +8855,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) return 0; } + if (unlikely(!cpu_has_virtual_nmis() && + vmx->loaded_vmcs->soft_vnmi_blocked)) { + if (vmx_interrupt_allowed(vcpu)) { + vmx->loaded_vmcs->soft_vnmi_blocked = 0; + } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && + vcpu->arch.nmi_pending) { + /* + * This CPU don't support us in finding the end of an + * NMI-blocked window if the guest runs with IRQs + * disabled. So we pull the trigger after 1 s of + * futile waiting, but inform the user about this. + */ + printk(KERN_WARNING "%s: Breaking out of NMI-blocked " + "state on VCPU %d after 1 s timeout\n", + __func__, vcpu->vcpu_id); + vmx->loaded_vmcs->soft_vnmi_blocked = 0; + } + } + if (exit_reason < kvm_vmx_max_exit_handlers && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu); @@ -9104,33 +9156,38 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; - if (vmx->loaded_vmcs->nmi_known_unmasked) - return; - /* - * Can't use vmx->exit_intr_info since we're not sure what - * the exit reason is. - */ - exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); - unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; - vector = exit_intr_info & INTR_INFO_VECTOR_MASK; - /* - * SDM 3: 27.7.1.2 (September 2008) - * Re-set bit "block by NMI" before VM entry if vmexit caused by - * a guest IRET fault. - * SDM 3: 23.2.2 (September 2008) - * Bit 12 is undefined in any of the following cases: - * If the VM exit sets the valid bit in the IDT-vectoring - * information field. - * If the VM exit is due to a double fault. - */ - if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && - vector != DF_VECTOR && !idtv_info_valid) - vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, - GUEST_INTR_STATE_NMI); - else - vmx->loaded_vmcs->nmi_known_unmasked = - !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) - & GUEST_INTR_STATE_NMI); + if (cpu_has_virtual_nmis()) { + if (vmx->loaded_vmcs->nmi_known_unmasked) + return; + /* + * Can't use vmx->exit_intr_info since we're not sure what + * the exit reason is. + */ + exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; + vector = exit_intr_info & INTR_INFO_VECTOR_MASK; + /* + * SDM 3: 27.7.1.2 (September 2008) + * Re-set bit "block by NMI" before VM entry if vmexit caused by + * a guest IRET fault. + * SDM 3: 23.2.2 (September 2008) + * Bit 12 is undefined in any of the following cases: + * If the VM exit sets the valid bit in the IDT-vectoring + * information field. + * If the VM exit is due to a double fault. + */ + if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && + vector != DF_VECTOR && !idtv_info_valid) + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_NMI); + else + vmx->loaded_vmcs->nmi_known_unmasked = + !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) + & GUEST_INTR_STATE_NMI); + } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) + vmx->loaded_vmcs->vnmi_blocked_time += + ktime_to_ns(ktime_sub(ktime_get(), + vmx->loaded_vmcs->entry_time)); } static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, @@ -9247,6 +9304,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long debugctlmsr, cr3, cr4; + /* Record the guest's net vcpu time for enforced NMI injections. */ + if (unlikely(!cpu_has_virtual_nmis() && + vmx->loaded_vmcs->soft_vnmi_blocked)) + vmx->loaded_vmcs->entry_time = ktime_get(); + /* Don't enter VMX if guest state is invalid, let the exit handler start emulation until we arrive back to a valid state */ if (vmx->emulation_required) From c21261e6311a7d237da47b608e933f3abd7e534e Mon Sep 17 00:00:00 2001 From: Jeff Moyer Date: Tue, 14 Nov 2017 20:37:27 -0500 Subject: [PATCH 0191/3326] dax: fix PMD faults on zero-length files commit 957ac8c421ad8b5eef9b17fe98e146d8311a541e upstream. PMD faults on a zero length file on a file system mounted with -o dax will not generate SIGBUS as expected. fd = open(...O_TRUNC); addr = mmap(NULL, 2*1024*1024, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); *addr = 'a'; The problem is this code in dax_iomap_pmd_fault: max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT; If the inode size is zero, we end up with a max_pgoff that is way larger than 0. :) Fix it by using DIV_ROUND_UP, as is done elsewhere in the kernel. I tested this with some simple test code that ensured that SIGBUS was received where expected. Fixes: 642261ac995e ("dax: add struct iomap based DAX PMD support") Signed-off-by: Jeff Moyer Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- fs/dax.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index f001d8c72a0651..191306cd8b6b56 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1327,7 +1327,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, * this is a reliable test. */ pgoff = linear_page_index(vma, pmd_addr); - max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT; + max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); @@ -1351,13 +1351,13 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, if ((pmd_addr + PMD_SIZE) > vma->vm_end) goto fallback; - if (pgoff > max_pgoff) { + if (pgoff >= max_pgoff) { result = VM_FAULT_SIGBUS; goto out; } /* If the PMD would extend beyond the file size */ - if ((pgoff | PG_PMD_COLOUR) > max_pgoff) + if ((pgoff | PG_PMD_COLOUR) >= max_pgoff) goto fallback; /* From d1c2e5668cfd6b0332330e711f3e86d7bfe29230 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 14 Nov 2017 09:59:54 -0500 Subject: [PATCH 0192/3326] dax: fix general protection fault in dax_alloc_inode commit 9f586fff6574f6ecbf323f92d44ffaf0d96225fe upstream. Don't crash in case of allocation failure in dax_alloc_inode. syzkaller hit the following crash on e4880bc5dfb1 kasan: CONFIG_KASAN_INLINE enabled kasan: GPF could be caused by NULL-ptr deref or user memory access [..] RIP: 0010:dax_alloc_inode+0x3b/0x70 drivers/dax/super.c:348 Call Trace: alloc_inode+0x65/0x180 fs/inode.c:208 new_inode_pseudo+0x69/0x190 fs/inode.c:890 new_inode+0x1c/0x40 fs/inode.c:919 mount_pseudo_xattr+0x288/0x560 fs/libfs.c:261 mount_pseudo include/linux/fs.h:2137 [inline] dax_mount+0x2e/0x40 drivers/dax/super.c:388 mount_fs+0x66/0x2d0 fs/super.c:1223 Fixes: 7b6be8444e0f ("dax: refactor dax-fs into a generic provider...") Reported-by: syzbot Signed-off-by: Mikulas Patocka Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- drivers/dax/super.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 557b937035328e..c4cd034a382053 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -344,6 +344,9 @@ static struct inode *dax_alloc_inode(struct super_block *sb) struct inode *inode; dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); + if (!dax_dev) + return NULL; + inode = &dax_dev->inode; inode->i_rdev = 0; return inode; From 210ecdf24e06fb076f40bf3d904d947deb70055e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 10 Oct 2017 17:31:42 -0400 Subject: [PATCH 0193/3326] SUNRPC: Fix tracepoint storage issues with svc_recv and svc_rqst_status commit e9d4bf219c83d09579bc62512fea2ca10f025d93 upstream. There is no guarantee that either the request or the svc_xprt exist by the time we get round to printing the trace message. Signed-off-by: Trond Myklebust Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- include/trace/events/sunrpc.h | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 25a7739514cd84..3868b475232466 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -456,20 +456,22 @@ TRACE_EVENT(svc_recv, TP_ARGS(rqst, status), TP_STRUCT__entry( - __field(struct sockaddr *, addr) __field(__be32, xid) __field(int, status) __field(unsigned long, flags) + __dynamic_array(unsigned char, addr, rqst->rq_addrlen) ), TP_fast_assign( - __entry->addr = (struct sockaddr *)&rqst->rq_addr; __entry->xid = status > 0 ? rqst->rq_xid : 0; __entry->status = status; __entry->flags = rqst->rq_flags; + memcpy(__get_dynamic_array(addr), + &rqst->rq_addr, rqst->rq_addrlen); ), - TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr, + TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", + (struct sockaddr *)__get_dynamic_array(addr), be32_to_cpu(__entry->xid), __entry->status, show_rqstp_flags(__entry->flags)) ); @@ -514,22 +516,23 @@ DECLARE_EVENT_CLASS(svc_rqst_status, TP_ARGS(rqst, status), TP_STRUCT__entry( - __field(struct sockaddr *, addr) __field(__be32, xid) - __field(int, dropme) __field(int, status) __field(unsigned long, flags) + __dynamic_array(unsigned char, addr, rqst->rq_addrlen) ), TP_fast_assign( - __entry->addr = (struct sockaddr *)&rqst->rq_addr; __entry->xid = rqst->rq_xid; __entry->status = status; __entry->flags = rqst->rq_flags; + memcpy(__get_dynamic_array(addr), + &rqst->rq_addr, rqst->rq_addrlen); ), TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s", - __entry->addr, be32_to_cpu(__entry->xid), + (struct sockaddr *)__get_dynamic_array(addr), + be32_to_cpu(__entry->xid), __entry->status, show_rqstp_flags(__entry->flags)) ); From bf93b23582af410df8e9ddd682e9a5c4d48b2c65 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Sat, 11 Nov 2017 17:29:29 +0100 Subject: [PATCH 0194/3326] clk: ti: dra7-atl-clock: fix child-node lookups commit 33ec6dbc5a02677509d97fe36cd2105753f0f0ea upstream. Fix child node-lookup during probe, which ended up searching the whole device tree depth-first starting at parent rather than just matching on its children. Note that the original premature free of the parent node has already been fixed separately, but that fix was apparently never backported to stable. Fixes: 9ac33b0ce81f ("CLK: TI: Driver for DRA7 ATL (Audio Tracking Logic)") Fixes: 660e15519399 ("clk: ti: dra7-atl-clock: Fix of_node reference counting") Cc: Peter Ujfalusi Signed-off-by: Johan Hovold Acked-by: Peter Ujfalusi Signed-off-by: Stephen Boyd Signed-off-by: Greg Kroah-Hartman --- drivers/clk/ti/clk-dra7-atl.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index 13eb04f72389bb..14881547043130 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -274,8 +274,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) /* Get configuration for the ATL instances */ snprintf(prop, sizeof(prop), "atl%u", i); - of_node_get(node); - cfg_node = of_find_node_by_name(node, prop); + cfg_node = of_get_child_by_name(node, prop); if (cfg_node) { ret = of_property_read_u32(cfg_node, "bws", &cdesc->bws); From 0f8fb6d5c2b77631c9b8d45417d2146e914ddb1b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 25 Sep 2017 11:01:31 -0700 Subject: [PATCH 0195/3326] libnvdimm, dimm: clear 'locked' status on successful DIMM enable commit d34cb808402898e53b9a9bcbbedd01667a78723b upstream. If we successfully enable a DIMM then it must not be locked and we can clear the label-read failure condition. Otherwise, we need to reload the entire bus provider driver to achieve the same effect, and that can disrupt unrelated DIMMs and namespaces. Fixes: 9d62ed965118 ("libnvdimm: handle locked label storage areas") Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- drivers/nvdimm/dimm.c | 1 + drivers/nvdimm/dimm_devs.c | 7 +++++++ drivers/nvdimm/nd.h | 1 + 3 files changed, 9 insertions(+) diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c index e0f0e3ce1a32ec..98466d762c8fac 100644 --- a/drivers/nvdimm/dimm.c +++ b/drivers/nvdimm/dimm.c @@ -68,6 +68,7 @@ static int nvdimm_probe(struct device *dev) rc = nd_label_reserve_dpa(ndd); if (ndd->ns_current >= 0) nvdimm_set_aliasing(dev); + nvdimm_clear_locked(dev); nvdimm_bus_unlock(dev); if (rc) diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index f0d1b7e5de01d1..5f1385b96b1315 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -200,6 +200,13 @@ void nvdimm_set_locked(struct device *dev) set_bit(NDD_LOCKED, &nvdimm->flags); } +void nvdimm_clear_locked(struct device *dev) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + + clear_bit(NDD_LOCKED, &nvdimm->flags); +} + static void nvdimm_release(struct device *dev) { struct nvdimm *nvdimm = to_nvdimm(dev); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 9c758a91372bbf..156be00e1f760a 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -254,6 +254,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, unsigned int len); void nvdimm_set_aliasing(struct device *dev); void nvdimm_set_locked(struct device *dev); +void nvdimm_clear_locked(struct device *dev); struct nd_btt *to_nd_btt(struct device *dev); struct nd_gen_sb { From 9a31016ea05be4c90ddf691f29aaf999e606e55e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 26 Sep 2017 13:07:06 -0700 Subject: [PATCH 0196/3326] libnvdimm, pfn: make 'resource' attribute only readable by root commit 26417ae4fc6108f8db436f24108b08f68bdc520e upstream. For the same reason that /proc/iomem returns 0's for non-root readers and acpi tables are root-only, make the 'resource' attribute for pfn devices only readable by root. Otherwise we disclose physical address information. Fixes: f6ed58c70d14 ("libnvdimm, pfn: 'resource'-address and 'size'...") Reported-by: Dave Hansen Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- drivers/nvdimm/pfn_devs.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 9576c444f0ab58..65cc171c721de8 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -282,8 +282,16 @@ static struct attribute *nd_pfn_attributes[] = { NULL, }; +static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n) +{ + if (a == &dev_attr_resource.attr) + return 0400; + return a->mode; +} + struct attribute_group nd_pfn_attribute_group = { .attrs = nd_pfn_attributes, + .is_visible = pfn_visible, }; static const struct attribute_group *nd_pfn_attribute_groups[] = { From 913ff23bac73a9a8825942c957b6252ab3f8f198 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 26 Sep 2017 11:41:28 -0700 Subject: [PATCH 0197/3326] libnvdimm, namespace: fix label initialization to use valid seq numbers commit b18d4b8a25af6fe83d7692191d6ff962ea611c4f upstream. The set of valid sequence numbers is {1,2,3}. The specification indicates that an implementation should consider 0 a sign of a critical error: UEFI 2.7: 13.19 NVDIMM Label Protocol Software never writes the sequence number 00, so a correctly check-summed Index Block with this sequence number probably indicates a critical error. When software discovers this case it treats it as an invalid Index Block indication. While the expectation is that the invalid block is just thrown away, the Robustness Principle says we should fix this to make both sequence numbers valid. Fixes: f524bf271a5c ("libnvdimm: write pmem label set") Reported-by: Juston Li Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- drivers/nvdimm/label.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 9c5f108910e337..de66c02f61409e 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -1050,7 +1050,7 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels) nsindex = to_namespace_index(ndd, 0); memset(nsindex, 0, ndd->nsarea.config_size); for (i = 0; i < 2; i++) { - int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT); + int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT); if (rc) return rc; From d6667298d1d0d47de619b23078ceb83ec2c09d46 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 26 Sep 2017 11:17:52 -0700 Subject: [PATCH 0198/3326] libnvdimm, region : make 'resource' attribute only readable by root commit b8ff981f88df03c72a4de2f6eaa9ce447a10ac03 upstream. For the same reason that /proc/iomem returns 0's for non-root readers and acpi tables are root-only, make the 'resource' attribute for region devices only readable by root. Otherwise we disclose physical address information. Fixes: 802f4be6feee ("libnvdimm: Add 'resource' sysfs attribute to regions") Cc: Dave Jiang Cc: Johannes Thumshirn Reported-by: Dave Hansen Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- drivers/nvdimm/region_devs.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 829d760f651c7f..abaf38c6122068 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -562,8 +562,12 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr) return 0; - if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr) - return 0; + if (a == &dev_attr_resource.attr) { + if (is_nd_pmem(dev)) + return 0400; + else + return 0; + } if (a == &dev_attr_deep_flush.attr) { int has_flush = nvdimm_has_flush(nd_region); From 65551fb50f05057da3f731059355fd67e6837a7b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 26 Sep 2017 11:21:24 -0700 Subject: [PATCH 0199/3326] libnvdimm, namespace: make 'resource' attribute only readable by root commit c1fb3542074fd0c4d901d778bd52455111e4eb6f upstream. For the same reason that /proc/iomem returns 0's for non-root readers and acpi tables are root-only, make the 'resource' attribute for namespace devices only readable by root. Otherwise we disclose physical address information. Fixes: bf9bccc14c05 ("libnvdimm: pmem label sets and namespace instantiation") Reported-by: Dave Hansen Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- drivers/nvdimm/namespace_devs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 3e4d1e7998dacb..0af988739a06c1 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1620,7 +1620,7 @@ static umode_t namespace_visible(struct kobject *kobj, if (a == &dev_attr_resource.attr) { if (is_namespace_blk(dev)) return 0; - return a->mode; + return 0400; } if (is_namespace_pmem(dev) || is_namespace_blk(dev)) { From a7a05def6f722746259ae58079470ebb891b584d Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 16 Oct 2017 12:14:33 -0400 Subject: [PATCH 0200/3326] svcrdma: Preserve CB send buffer across retransmits commit 0bad47cada5defba13e98827d22d06f13258dfb3 upstream. During each NFSv4 callback Call, an RDMA Send completion frees the page that contains the RPC Call message. If the upper layer determines that a retransmit is necessary, this is too soon. One possible symptom: after a GARBAGE_ARGS response an NFSv4.1 callback request, the following BUG fires on the NFS server: kernel: BUG: Bad page state in process kworker/0:2H pfn:7d3ce2 kernel: page:ffffea001f4f3880 count:-2 mapcount:0 mapping: (null) index:0x0 kernel: flags: 0x2fffff80000000() kernel: raw: 002fffff80000000 0000000000000000 0000000000000000 fffffffeffffffff kernel: raw: dead000000000100 dead000000000200 0000000000000000 0000000000000000 kernel: page dumped because: nonzero _refcount kernel: Modules linked in: cts rpcsec_gss_krb5 ocfs2_dlmfs ocfs2_stack_o2cb ocfs2_dlm ocfs2_nodemanager ocfs2_stackglue rpcrdm a ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm irqbypass crct10dif_pc lmul crc32_pclmul ghash_clmulni_intel pcbc iTCO_wdt iTCO_vendor_support aesni_intel crypto_simd glue_helper cryptd pcspkr lpc_ich i2c_i801 mei_me mf d_core mei raid0 sg wmi ioatdma ipmi_si ipmi_devintf ipmi_msghandler shpchp acpi_power_meter acpi_pad nfsd nfs_acl lockd auth_rpcgss grace sunrpc ip_tables xfs libcrc32c mlx4_en mlx4_ib mlx5_ib ib_core sd_mod sr_mod cdrom ast drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ttm ahci crc32c_intel libahci drm mlx5_core igb libata mlx4_core dca i2c_algo_bit i2c_core nvme kernel: ptp nvme_core pps_core dm_mirror dm_region_hash dm_log dm_mod dax kernel: CPU: 0 PID: 11495 Comm: kworker/0:2H Not tainted 4.14.0-rc3-00001-g577ce48 #811 kernel: Hardware name: Supermicro Super Server/X10SRL-F, BIOS 1.0c 09/09/2015 kernel: Workqueue: ib-comp-wq ib_cq_poll_work [ib_core] kernel: Call Trace: kernel: dump_stack+0x62/0x80 kernel: bad_page+0xfe/0x11a kernel: free_pages_check_bad+0x76/0x78 kernel: free_pcppages_bulk+0x364/0x441 kernel: ? ttwu_do_activate.isra.61+0x71/0x78 kernel: free_hot_cold_page+0x1c5/0x202 kernel: __put_page+0x2c/0x36 kernel: svc_rdma_put_context+0xd9/0xe4 [rpcrdma] kernel: svc_rdma_wc_send+0x50/0x98 [rpcrdma] This issue exists all the way back to v4.5, but refactoring and code re-organization prevents this simple patch from applying to kernels older than v4.12. The fix is the same, however, if someone needs to backport it. Reported-by: Ben Coddington BugLink: https://bugzilla.linux-nfs.org/show_bug.cgi?id=314 Fixes: 5d252f90a800 ('svcrdma: Add class for RDMA backwards ... ') Signed-off-by: Chuck Lever Reviewed-by: Jeff Layton Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 992594b7cc6b69..af7893501e40ac 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -133,6 +133,10 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, if (ret) goto out_err; + /* Bump page refcnt so Send completion doesn't release + * the rq_buffer before all retransmits are complete. + */ + get_page(virt_to_page(rqst->rq_buffer)); ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0); if (ret) goto out_unmap; @@ -165,7 +169,6 @@ xprt_rdma_bc_allocate(struct rpc_task *task) return -EINVAL; } - /* svc_rdma_sendto releases this page */ page = alloc_page(RPCRDMA_DEF_GFP); if (!page) return -ENOMEM; @@ -184,6 +187,7 @@ xprt_rdma_bc_free(struct rpc_task *task) { struct rpc_rqst *rqst = task->tk_rqstp; + put_page(virt_to_page(rqst->rq_buffer)); kfree(rqst->rq_rbuffer); } From d56fd8a032ab2879b70f9b69c4f59713552c1f9a Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 11 Oct 2017 10:27:22 -0700 Subject: [PATCH 0201/3326] IB/srpt: Do not accept invalid initiator port names commit c70ca38960399a63d5c048b7b700612ea321d17e upstream. Make srpt_parse_i_port_id() return a negative value if hex2bin() fails. Fixes: commit a42d985bd5b2 ("ib_srpt: Initial SRP Target merge for v3.3-rc1") Signed-off-by: Bart Van Assche Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/ulp/srpt/ib_srpt.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 9e8e9220f81679..95178b4e3565d4 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -2777,7 +2777,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) { const char *p; unsigned len, count, leading_zero_bytes; - int ret, rc; + int ret; p = name; if (strncasecmp(p, "0x", 2) == 0) @@ -2789,10 +2789,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) count = min(len / 2, 16U); leading_zero_bytes = 16 - count; memset(i_port_id, 0, leading_zero_bytes); - rc = hex2bin(i_port_id + leading_zero_bytes, p, count); - if (rc < 0) - pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc); - ret = 0; + ret = hex2bin(i_port_id + leading_zero_bytes, p, count); + if (ret < 0) + pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret); out: return ret; } From 5449283f245d85c80077fd024491de5ae0e40907 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 19 Oct 2017 08:40:30 +0300 Subject: [PATCH 0202/3326] IB/cm: Fix memory corruption in handling CM request commit 5a3dc32372439eb9a0d6027c54cbfff64803fce5 upstream. In recent code, two path record entries are alwasy cleared while allocated could be either one or two path record entries. This leads to zero out of unallocated memory. This fix initializes alternative path record only when alternative path is set. While we are at it, path record allocation doesn't check for OPA alternative path, but rest of the code checks for OPA alternative path. Path record allocation code doesn't check for OPA alternative LID. This can further lead to memory corruption when only one path record is allocated, but there is actually alternative OPA path record present in CM request. Fixes: 9fdca4da4d8c ("IB/SA: Split struct sa_path_rec based on IB and ROCE specific fields") Signed-off-by: Parav Pandit Reviewed-by: Moni Shoua Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/cm.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 4c4b46586af2b9..2af79e4f323593 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1575,7 +1575,7 @@ static void cm_format_req_event(struct cm_work *work, param->bth_pkey = cm_get_bth_pkey(work); param->port = cm_id_priv->av.port->port_num; param->primary_path = &work->path[0]; - if (req_msg->alt_local_lid) + if (cm_req_has_alt_path(req_msg)) param->alternate_path = &work->path[1]; else param->alternate_path = NULL; @@ -1856,7 +1856,8 @@ static int cm_req_handler(struct cm_work *work) cm_process_routed_req(req_msg, work->mad_recv_wc->wc); memset(&work->path[0], 0, sizeof(work->path[0])); - memset(&work->path[1], 0, sizeof(work->path[1])); + if (cm_req_has_alt_path(req_msg)) + memset(&work->path[1], 0, sizeof(work->path[1])); grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr); ret = ib_get_cached_gid(work->port->cm_dev->ib_device, work->port->port_num, @@ -3817,14 +3818,16 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, struct cm_port *port = mad_agent->context; struct cm_work *work; enum ib_cm_event_type event; + bool alt_path = false; u16 attr_id; int paths = 0; int going_down = 0; switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { case CM_REQ_ATTR_ID: - paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> - alt_local_lid != 0); + alt_path = cm_req_has_alt_path((struct cm_req_msg *) + mad_recv_wc->recv_buf.mad); + paths = 1 + (alt_path != 0); event = IB_CM_REQ_RECEIVED; break; case CM_MRA_ATTR_ID: From d555533d4238957beebbce0e92d190b95c8f5908 Mon Sep 17 00:00:00 2001 From: "Michael J. Ruhl" Date: Mon, 2 Oct 2017 11:04:19 -0700 Subject: [PATCH 0203/3326] IB/hfi1: Fix incorrect available receive user context count commit d7d626179fb283aba73699071af0df6d00e32138 upstream. The addition of the VNIC contexts to num_rcv_contexts changes the meaning of the sysfs value nctxts from available user contexts, to user contexts + reserved VNIC contexts. User applications that use nctxts are now broken. Update the calculation so that VNIC contexts are used only if there are hardware contexts available, and do not silently affect nctxts. Update code to use the calculated VNIC context number. Update the sysfs value nctxts to be available user contexts only. Fixes: 2280740f01ae ("IB/hfi1: Virtual Network Interface Controller (VNIC) HW support") Reviewed-by: Ira Weiny Reviewed-by: Niranjana Vishwanathapura Reviewed-by: Mike Marciniszyn Signed-off-by: Michael J. Ruhl Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/hfi1/chip.c | 35 +++++++++++++++----------- drivers/infiniband/hw/hfi1/hfi.h | 2 ++ drivers/infiniband/hw/hfi1/sysfs.c | 2 +- drivers/infiniband/hw/hfi1/vnic_main.c | 7 ++++-- 4 files changed, 29 insertions(+), 17 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 0be42787759fa7..312444386f5413 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -13074,7 +13074,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd) first_sdma = last_general; last_sdma = first_sdma + dd->num_sdma; first_rx = last_sdma; - last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT; + last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts; /* VNIC MSIx interrupts get mapped when VNIC contexts are created */ dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues; @@ -13294,8 +13294,9 @@ static int set_up_interrupts(struct hfi1_devdata *dd) * slow source, SDMACleanupDone) * N interrupts - one per used SDMA engine * M interrupt - one per kernel receive context + * V interrupt - one for each VNIC context */ - total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT; + total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts; /* ask for MSI-X interrupts */ request = request_msix(dd, total); @@ -13356,10 +13357,12 @@ static int set_up_interrupts(struct hfi1_devdata *dd) * in array of contexts * freectxts - number of free user contexts * num_send_contexts - number of PIO send contexts being used + * num_vnic_contexts - number of contexts reserved for VNIC */ static int set_up_context_variables(struct hfi1_devdata *dd) { unsigned long num_kernel_contexts; + u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT; int total_contexts; int ret; unsigned ngroups; @@ -13393,6 +13396,14 @@ static int set_up_context_variables(struct hfi1_devdata *dd) num_kernel_contexts); num_kernel_contexts = dd->chip_send_contexts - num_vls - 1; } + + /* Accommodate VNIC contexts if possible */ + if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) { + dd_dev_err(dd, "No receive contexts available for VNIC\n"); + num_vnic_contexts = 0; + } + total_contexts = num_kernel_contexts + num_vnic_contexts; + /* * User contexts: * - default to 1 user context per real (non-HT) CPU core if @@ -13402,19 +13413,16 @@ static int set_up_context_variables(struct hfi1_devdata *dd) num_user_contexts = cpumask_weight(&node_affinity.real_cpu_mask); - total_contexts = num_kernel_contexts + num_user_contexts; - /* * Adjust the counts given a global max. */ - if (total_contexts > dd->chip_rcv_contexts) { + if (total_contexts + num_user_contexts > dd->chip_rcv_contexts) { dd_dev_err(dd, "Reducing # user receive contexts to: %d, from %d\n", - (int)(dd->chip_rcv_contexts - num_kernel_contexts), + (int)(dd->chip_rcv_contexts - total_contexts), (int)num_user_contexts); - num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts; /* recalculate */ - total_contexts = num_kernel_contexts + num_user_contexts; + num_user_contexts = dd->chip_rcv_contexts - total_contexts; } /* each user context requires an entry in the RMT */ @@ -13427,25 +13435,24 @@ static int set_up_context_variables(struct hfi1_devdata *dd) user_rmt_reduced); /* recalculate */ num_user_contexts = user_rmt_reduced; - total_contexts = num_kernel_contexts + num_user_contexts; } - /* Accommodate VNIC contexts */ - if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts) - total_contexts += HFI1_NUM_VNIC_CTXT; + total_contexts += num_user_contexts; /* the first N are kernel contexts, the rest are user/vnic contexts */ dd->num_rcv_contexts = total_contexts; dd->n_krcv_queues = num_kernel_contexts; dd->first_dyn_alloc_ctxt = num_kernel_contexts; + dd->num_vnic_contexts = num_vnic_contexts; dd->num_user_contexts = num_user_contexts; dd->freectxts = num_user_contexts; dd_dev_info(dd, - "rcv contexts: chip %d, used %d (kernel %d, user %d)\n", + "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n", (int)dd->chip_rcv_contexts, (int)dd->num_rcv_contexts, (int)dd->n_krcv_queues, - (int)dd->num_rcv_contexts - dd->n_krcv_queues); + dd->num_vnic_contexts, + dd->num_user_contexts); /* * Receive array allocation: diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 3ac9c307a285f1..6ff44dc606ebac 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1047,6 +1047,8 @@ struct hfi1_devdata { u64 z_send_schedule; u64 __percpu *send_schedule; + /* number of reserved contexts for VNIC usage */ + u16 num_vnic_contexts; /* number of receive contexts in use by the driver */ u32 num_rcv_contexts; /* number of pio send contexts in use by the driver */ diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index 6d2702ef34ac4c..25e867393463e5 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c @@ -543,7 +543,7 @@ static ssize_t show_nctxts(struct device *device, * give a more accurate picture of total contexts available. */ return scnprintf(buf, PAGE_SIZE, "%u\n", - min(dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt, + min(dd->num_user_contexts, (u32)dd->sc_sizes[SC_USER].count)); } diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c index f419cbb0592889..1a17708be46a92 100644 --- a/drivers/infiniband/hw/hfi1/vnic_main.c +++ b/drivers/infiniband/hw/hfi1/vnic_main.c @@ -840,6 +840,9 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, struct rdma_netdev *rn; int i, size, rc; + if (!dd->num_vnic_contexts) + return ERR_PTR(-ENOMEM); + if (!port_num || (port_num > dd->num_pports)) return ERR_PTR(-EINVAL); @@ -848,7 +851,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo); netdev = alloc_netdev_mqs(size, name, name_assign_type, setup, - dd->chip_sdma_engines, HFI1_NUM_VNIC_CTXT); + dd->chip_sdma_engines, dd->num_vnic_contexts); if (!netdev) return ERR_PTR(-ENOMEM); @@ -856,7 +859,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, vinfo = opa_vnic_dev_priv(netdev); vinfo->dd = dd; vinfo->num_tx_q = dd->chip_sdma_engines; - vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT; + vinfo->num_rx_q = dd->num_vnic_contexts; vinfo->netdev = netdev; rn->free_rdma_netdev = hfi1_vnic_free_rn; rn->set_id = hfi1_vnic_set_vesw_id; From c15ec17e8d8da60e07f81afe7c6f699b6233d80e Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 11 Oct 2017 10:27:26 -0700 Subject: [PATCH 0204/3326] IB/srp: Avoid that a cable pull can trigger a kernel crash commit 8a0d18c62121d3c554a83eb96e2752861d84d937 upstream. This patch fixes the following kernel crash: general protection fault: 0000 [#1] PREEMPT SMP Workqueue: ib_mad2 timeout_sends [ib_core] Call Trace: ib_sa_path_rec_callback+0x1c4/0x1d0 [ib_core] send_handler+0xb2/0xd0 [ib_core] timeout_sends+0x14d/0x220 [ib_core] process_one_work+0x200/0x630 worker_thread+0x4e/0x3b0 kthread+0x113/0x150 Fixes: commit aef9ec39c47f ("IB: Add SCSI RDMA Protocol (SRP) initiator") Signed-off-by: Bart Van Assche Reviewed-by: Sagi Grimberg Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/ulp/srp/ib_srp.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index fa5ccdb3bb2a07..60d7b493ed2dc1 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -665,12 +665,19 @@ static void srp_path_rec_completion(int status, static int srp_lookup_path(struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; - int ret; + int ret = -ENODEV; ch->path.numb_path = 1; init_completion(&ch->done); + /* + * Avoid that the SCSI host can be removed by srp_remove_target() + * before srp_path_rec_completion() is called. + */ + if (!scsi_host_get(target->scsi_host)) + goto out; + ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, target->srp_host->srp_dev->dev, target->srp_host->port, @@ -684,18 +691,24 @@ static int srp_lookup_path(struct srp_rdma_ch *ch) GFP_KERNEL, srp_path_rec_completion, ch, &ch->path_query); - if (ch->path_query_id < 0) - return ch->path_query_id; + ret = ch->path_query_id; + if (ret < 0) + goto put; ret = wait_for_completion_interruptible(&ch->done); if (ret < 0) - return ret; + goto put; - if (ch->status < 0) + ret = ch->status; + if (ret < 0) shost_printk(KERN_WARNING, target->scsi_host, PFX "Path record query failed\n"); - return ch->status; +put: + scsi_host_put(target->scsi_host); + +out: + return ret; } static int srp_send_req(struct srp_rdma_ch *ch, bool multich) From 2477321f8a1ae2920ba503e4216992bddfb33687 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Tue, 31 Oct 2017 10:33:18 -0500 Subject: [PATCH 0205/3326] IB/core: Avoid crash on pkey enforcement failed in received MADs commit 89548bcafec7ecfeea58c553f0834b5d575a66eb upstream. Below kernel crash is observed when Pkey security enforcement fails on received MADs. This issue is reported in [1]. ib_free_recv_mad() accesses the rmpp_list, whose initialization is needed before accessing it. When security enformcent fails on received MADs, MAD processing avoided due to security checks failed. OpenSM[3770]: SM port is down kernel: BUG: unable to handle kernel NULL pointer dereference at 0000000000000008 kernel: IP: ib_free_recv_mad+0x44/0xa0 [ib_core] kernel: PGD 0 kernel: P4D 0 kernel: kernel: Oops: 0002 [#1] SMP kernel: CPU: 0 PID: 2833 Comm: kworker/0:1H Tainted: P IO 4.13.4-1-pve #1 kernel: Hardware name: Dell XS23-TY3 /9CMP63, BIOS 1.71 09/17/2013 kernel: Workqueue: ib-comp-wq ib_cq_poll_work [ib_core] kernel: task: ffffa069c6541600 task.stack: ffffb9a729054000 kernel: RIP: 0010:ib_free_recv_mad+0x44/0xa0 [ib_core] kernel: RSP: 0018:ffffb9a729057d38 EFLAGS: 00010286 kernel: RAX: ffffa069cb138a48 RBX: ffffa069cb138a10 RCX: 0000000000000000 kernel: RDX: ffffb9a729057d38 RSI: 0000000000000000 RDI: ffffa069cb138a20 kernel: RBP: ffffb9a729057d60 R08: ffffa072d2d49800 R09: ffffa069cb138ae0 kernel: R10: ffffa069cb138ae0 R11: ffffa072b3994e00 R12: ffffb9a729057d38 kernel: R13: ffffa069d1c90000 R14: 0000000000000000 R15: ffffa069d1c90880 kernel: FS: 0000000000000000(0000) GS:ffffa069dba00000(0000) knlGS:0000000000000000 kernel: CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 kernel: CR2: 0000000000000008 CR3: 00000011f51f2000 CR4: 00000000000006f0 kernel: Call Trace: kernel: ib_mad_recv_done+0x5cc/0xb50 [ib_core] kernel: __ib_process_cq+0x5c/0xb0 [ib_core] kernel: ib_cq_poll_work+0x20/0x60 [ib_core] kernel: process_one_work+0x1e9/0x410 kernel: worker_thread+0x4b/0x410 kernel: kthread+0x109/0x140 kernel: ? process_one_work+0x410/0x410 kernel: ? kthread_create_on_node+0x70/0x70 kernel: ? SyS_exit_group+0x14/0x20 kernel: ret_from_fork+0x25/0x30 kernel: RIP: ib_free_recv_mad+0x44/0xa0 [ib_core] RSP: ffffb9a729057d38 kernel: CR2: 0000000000000008 [1] : https://www.spinics.net/lists/linux-rdma/msg56190.html Fixes: 47a2b338fe63 ("IB/core: Enforce security on management datagrams") Signed-off-by: Parav Pandit Reported-by: Chris Blake Reviewed-by: Daniel Jurgens Reviewed-by: Hal Rosenstock Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/mad.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index f8f53bb90837ca..cb91245e916332 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1974,14 +1974,15 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, unsigned long flags; int ret; + INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); ret = ib_mad_enforce_security(mad_agent_priv, mad_recv_wc->wc->pkey_index); if (ret) { ib_free_recv_mad(mad_recv_wc); deref_mad_agent(mad_agent_priv); + return; } - INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, From f92dea588014ae5083cac7ee33c88d58c9e87127 Mon Sep 17 00:00:00 2001 From: Daniel Jurgens Date: Tue, 7 Nov 2017 18:33:26 +0200 Subject: [PATCH 0206/3326] IB/core: Only maintain real QPs in the security lists commit 877add28178a7fa3c68f29c450d050a8e6513f08 upstream. When modify QP is called on a shared QP update the security context for the real QP. When security is subsequently enforced the shared QP handles will be checked as well. Without this change shared QP handles get added to the port/pkey lists, which is a bug, because not all shared QP handles will be checked for access. Also the shared QP security context wouldn't get removed from the port/pkey lists causing access to free memory and list corruption when they are destroyed. Fixes: d291f1a65232 ("IB/core: Enforce PKey security on QPs") Signed-off-by: Daniel Jurgens Reviewed-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/security.c | 51 ++++++++++++++++-------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 88bdafb297f5fe..28607bb42d873e 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -87,16 +87,14 @@ static int enforce_qp_pkey_security(u16 pkey, if (ret) return ret; - if (qp_sec->qp == qp_sec->qp->real_qp) { - list_for_each_entry(shared_qp_sec, - &qp_sec->shared_qp_list, - shared_qp_list) { - ret = security_ib_pkey_access(shared_qp_sec->security, - subnet_prefix, - pkey); - if (ret) - return ret; - } + list_for_each_entry(shared_qp_sec, + &qp_sec->shared_qp_list, + shared_qp_list) { + ret = security_ib_pkey_access(shared_qp_sec->security, + subnet_prefix, + pkey); + if (ret) + return ret; } return 0; } @@ -560,15 +558,22 @@ int ib_security_modify_qp(struct ib_qp *qp, int ret = 0; struct ib_ports_pkeys *tmp_pps; struct ib_ports_pkeys *new_pps; - bool special_qp = (qp->qp_type == IB_QPT_SMI || - qp->qp_type == IB_QPT_GSI || - qp->qp_type >= IB_QPT_RESERVED1); + struct ib_qp *real_qp = qp->real_qp; + bool special_qp = (real_qp->qp_type == IB_QPT_SMI || + real_qp->qp_type == IB_QPT_GSI || + real_qp->qp_type >= IB_QPT_RESERVED1); bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) || (qp_attr_mask & IB_QP_ALT_PATH)); + /* The port/pkey settings are maintained only for the real QP. Open + * handles on the real QP will be in the shared_qp_list. When + * enforcing security on the real QP all the shared QPs will be + * checked as well. + */ + if (pps_change && !special_qp) { - mutex_lock(&qp->qp_sec->mutex); - new_pps = get_new_pps(qp, + mutex_lock(&real_qp->qp_sec->mutex); + new_pps = get_new_pps(real_qp, qp_attr, qp_attr_mask); @@ -586,14 +591,14 @@ int ib_security_modify_qp(struct ib_qp *qp, if (!ret) ret = check_qp_port_pkey_settings(new_pps, - qp->qp_sec); + real_qp->qp_sec); } if (!ret) - ret = qp->device->modify_qp(qp->real_qp, - qp_attr, - qp_attr_mask, - udata); + ret = real_qp->device->modify_qp(real_qp, + qp_attr, + qp_attr_mask, + udata); if (pps_change && !special_qp) { /* Clean up the lists and free the appropriate @@ -602,8 +607,8 @@ int ib_security_modify_qp(struct ib_qp *qp, if (ret) { tmp_pps = new_pps; } else { - tmp_pps = qp->qp_sec->ports_pkeys; - qp->qp_sec->ports_pkeys = new_pps; + tmp_pps = real_qp->qp_sec->ports_pkeys; + real_qp->qp_sec->ports_pkeys = new_pps; } if (tmp_pps) { @@ -611,7 +616,7 @@ int ib_security_modify_qp(struct ib_qp *qp, port_pkey_list_remove(&tmp_pps->alt); } kfree(tmp_pps); - mutex_unlock(&qp->qp_sec->mutex); + mutex_unlock(&real_qp->qp_sec->mutex); } return ret; } From eb2499b394805094884601b85d209bb9a9d16384 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Sun, 9 Jul 2017 13:08:58 +0200 Subject: [PATCH 0207/3326] NFC: fix device-allocation error return commit c45e3e4c5b134b081e8af362109905427967eb19 upstream. A recent change fixing NFC device allocation itself introduced an error-handling bug by returning an error pointer in case device-id allocation failed. This is clearly broken as the callers still expected NULL to be returned on errors as detected by Dan's static checker. Fix this up by returning NULL in the event that we've run out of memory when allocating a new device id. Note that the offending commit is marked for stable (3.8) so this fix needs to be backported along with it. Fixes: 20777bc57c34 ("NFC: fix broken device allocation") Reported-by: Dan Carpenter Signed-off-by: Johan Hovold Signed-off-by: Samuel Ortiz Signed-off-by: Greg Kroah-Hartman --- net/nfc/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/nfc/core.c b/net/nfc/core.c index 5cf33df888c3d1..c699d64a0753ae 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -1106,7 +1106,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, err_free_dev: kfree(dev); - return ERR_PTR(rc); + return NULL; } EXPORT_SYMBOL(nfc_allocate_device); From bb76fcebb9c9edf843581e44c6a0a60033f3a66f Mon Sep 17 00:00:00 2001 From: Bin Meng Date: Mon, 11 Sep 2017 02:41:53 -0700 Subject: [PATCH 0208/3326] spi-nor: intel-spi: Fix broken software sequencing codes commit 9d63f17661e25fd28714dac94bdebc4ff5b75f09 upstream. There are two bugs in current intel_spi_sw_cycle(): - The 'data byte count' field should be the number of bytes transferred minus 1 - SSFSTS_CTL is the offset from ispi->sregs, not ispi->base Signed-off-by: Bin Meng Acked-by: Mika Westerberg Signed-off-by: Cyrille Pitchen Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/spi-nor/intel-spi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c index 8a596bfeddff6c..7802ac3ba93483 100644 --- a/drivers/mtd/spi-nor/intel-spi.c +++ b/drivers/mtd/spi-nor/intel-spi.c @@ -422,7 +422,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, if (ret < 0) return ret; - val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; + val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; val |= ret << SSFSTS_CTL_COP_SHIFT; val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; val |= SSFSTS_CTL_SCGO; @@ -432,7 +432,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, if (ret) return ret; - status = readl(ispi->base + SSFSTS_CTL); + status = readl(ispi->sregs + SSFSTS_CTL); if (status & SSFSTS_CTL_FCERR) return -EIO; else if (status & SSFSTS_CTL_AEL) From 5727faafa56eeacb1f409969b373d912ef259ca5 Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 17 Nov 2017 11:05:44 -0600 Subject: [PATCH 0209/3326] i40e: Use smp_rmb rather than read_barrier_depends commit 52c6912fde0133981ee50ba08808f257829c4c93 upstream. The original issue being fixed in this patch was seen with the ixgbe driver, but the same issue exists with i40e as well, as the code is very similar. read_barrier_depends is not sufficient to ensure loads following it are not speculatively loaded out of order by the CPU, which can result in stale data being loaded, causing potential system crashes. Signed-off-by: Brian King Acked-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6498da8806cbf4..ea20aacd5e1d24 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3760,7 +3760,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz & diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 120c68f78951dd..3c07ff171ddcce 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -759,7 +759,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* we have caught up to head, no work left to do */ From 8379910921d1658391d62d24e6534c4725563252 Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 17 Nov 2017 11:05:47 -0600 Subject: [PATCH 0210/3326] igb: Use smp_rmb rather than read_barrier_depends commit c4cb99185b4cc96c0a1c70104dc21ae14d7e7f28 upstream. The original issue being fixed in this patch was seen with the ixgbe driver, but the same issue exists with igb as well, as the code is very similar. read_barrier_depends is not sufficient to ensure loads following it are not speculatively loaded out of order by the CPU, which can result in stale data being loaded, causing potential system crashes. Signed-off-by: Brian King Acked-by: Jesse Brandeburg Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/igb/igb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ea69af267d6352..b0031c5ff767a4 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6970,7 +6970,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) From 0d8be80e5fc4c2e03bc9df9b0fb53e62981e8d53 Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 17 Nov 2017 11:05:46 -0600 Subject: [PATCH 0211/3326] igbvf: Use smp_rmb rather than read_barrier_depends commit 1e1f9ca546556e508d021545861f6b5fc75a95fe upstream. The original issue being fixed in this patch was seen with the ixgbe driver, but the same issue exists with igbvf as well, as the code is very similar. read_barrier_depends is not sufficient to ensure loads following it are not speculatively loaded out of order by the CPU, which can result in stale data being loaded, causing potential system crashes. Signed-off-by: Brian King Acked-by: Jesse Brandeburg Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/igbvf/netdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 1ed556911b1472..6f5888bd91944e 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) From bc46257bf4d825e56566d2710b56efdc913b0eb6 Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 17 Nov 2017 11:05:45 -0600 Subject: [PATCH 0212/3326] ixgbevf: Use smp_rmb rather than read_barrier_depends commit ae0c585d93dfaf923d2c7eb44b2c3ab92854ea9b upstream. The original issue being fixed in this patch was seen with the ixgbe driver, but the same issue exists with ixgbevf as well, as the code is very similar. read_barrier_depends is not sufficient to ensure loads following it are not speculatively loaded out of order by the CPU, which can result in stale data being loaded, causing potential system crashes. Signed-off-by: Brian King Acked-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 032f8ac06357ae..90ecc4b0646210 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -326,7 +326,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) From 61cef8f08c5121f1e7d192190c4f71a7eb6ec58f Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 17 Nov 2017 11:05:49 -0600 Subject: [PATCH 0213/3326] i40evf: Use smp_rmb rather than read_barrier_depends commit f72271e2a0ae4277d53c4053f5eed8bb346ba38a upstream. The original issue being fixed in this patch was seen with the ixgbe driver, but the same issue exists with i40evf as well, as the code is very similar. read_barrier_depends is not sufficient to ensure loads following it are not speculatively loaded out of order by the CPU, which can result in stale data being loaded, causing potential system crashes. Signed-off-by: Brian King Acked-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index c32c62462c844a..07a4e6e13925fa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -179,7 +179,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* if the descriptor isn't done, no work yet to do */ From 730501f25a42c88487458b0c08253734fd6ac6b2 Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 17 Nov 2017 11:05:48 -0600 Subject: [PATCH 0214/3326] fm10k: Use smp_rmb rather than read_barrier_depends commit 7b8edcc685b5e2c3c37aa13dc50a88e84a5bfef8 upstream. The original issue being fixed in this patch was seen with the ixgbe driver, but the same issue exists with fm10k as well, as the code is very similar. read_barrier_depends is not sufficient to ensure loads following it are not speculatively loaded out of order by the CPU, which can result in stale data being loaded, causing potential system crashes. Signed-off-by: Brian King Acked-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 9dffaba85ae6bb..103c0a742d039d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1229,7 +1229,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) From 00f886bb5545ea5eb82a9a0abf656257bb570957 Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 17 Nov 2017 11:05:43 -0600 Subject: [PATCH 0215/3326] ixgbe: Fix skb list corruption on Power systems commit 0a9a17e3bb4564caf4bfe2a6783ae1287667d188 upstream. This patch fixes an issue seen on Power systems with ixgbe which results in skb list corruption and an eventual kernel oops. The following is what was observed: CPU 1 CPU2 ============================ ============================ 1: ixgbe_xmit_frame_ring ixgbe_clean_tx_irq 2: first->skb = skb eop_desc = tx_buffer->next_to_watch 3: ixgbe_tx_map read_barrier_depends() 4: wmb check adapter written status bit 5: first->next_to_watch = tx_desc napi_consume_skb(tx_buffer->skb ..); 6: writel(i, tx_ring->tail); The read_barrier_depends is insufficient to ensure that tx_buffer->skb does not get loaded prior to tx_buffer->next_to_watch, which then results in loading a stale skb pointer. This patch replaces the read_barrier_depends with smp_rmb to ensure loads are ordered with respect to the load of tx_buffer->next_to_watch. Signed-off-by: Brian King Acked-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6d5f31e943583d..879a9c4cef5983 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1192,7 +1192,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) From c7311af0e236710e57a71bc942d1fd1f968865a5 Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Sat, 11 Nov 2017 17:11:16 -0500 Subject: [PATCH 0216/3326] parisc: Fix validity check of pointer size argument in new CAS implementation commit 05f016d2ca7a4fab99d5d5472168506ddf95e74f upstream. As noted by Christoph Biedl, passing a pointer size of 4 in the new CAS implementation causes a kernel crash. The attached patch corrects the off by one error in the argument validity check. In reviewing the code, I noticed that we only perform word operations with the pointer size argument. The subi instruction intentionally uses a word condition on 64-bit kernels. Nullification was used instead of a cmpib instruction as the branch should never be taken. The shlw pseudo-operation generates a depw,z instruction and it clears the target before doing a shift left word deposit. Thus, we don't need to clip the upper 32 bits of this argument on 64-bit kernels. Tested with a gcc testsuite run with a 64-bit kernel. The gcc atomic code in libgcc is the only direct user of the new CAS implementation that I am aware of. Signed-off-by: John David Anglin Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman --- arch/parisc/kernel/syscall.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 41e60a9c7db23b..e775f80ae28c5a 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -690,15 +690,15 @@ cas_action: /* ELF32 Process entry path */ lws_compare_and_swap_2: #ifdef CONFIG_64BIT - /* Clip the input registers */ + /* Clip the input registers. We don't need to clip %r23 as we + only use it for word operations */ depdi 0, 31, 32, %r26 depdi 0, 31, 32, %r25 depdi 0, 31, 32, %r24 - depdi 0, 31, 32, %r23 #endif /* Check the validity of the size pointer */ - subi,>>= 4, %r23, %r0 + subi,>>= 3, %r23, %r0 b,n lws_exit_nosys /* Jump to the functions which will load the old and new values into From df4d69feaf017cd2d6d521d5d32870e7e05f7121 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 21 Nov 2017 15:28:20 +0100 Subject: [PATCH 0217/3326] powerpc: Fix boot on BOOK3S_32 with CONFIG_STRICT_KERNEL_RWX commit 252eb55816a6f69ef9464cad303cdb3326cdc61d upstream. On powerpc32, patch_instruction() is called by apply_feature_fixups() which is called from early_init() There is the following note in front of early_init(): * Note that the kernel may be running at an address which is different * from the address that it was linked at, so we must use RELOC/PTRRELOC * to access static data (including strings). -- paulus Therefore, slab_is_available() cannot be called yet, and text_poke_area must be addressed with PTRRELOC() Fixes: 95902e6c8864 ("powerpc/mm: Implement STRICT_KERNEL_RWX on PPC32") Reported-by: Meelis Roos Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/lib/code-patching.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index c9de03e0c1f123..d469224c4ada8c 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -21,6 +21,7 @@ #include #include #include +#include static int __patch_instruction(unsigned int *addr, unsigned int instr) { @@ -146,11 +147,8 @@ int patch_instruction(unsigned int *addr, unsigned int instr) * During early early boot patch_instruction is called * when text_poke_area is not ready, but we still need * to allow patching. We just do the plain old patching - * We use slab_is_available and per cpu read * via this_cpu_read - * of text_poke_area. Per-CPU areas might not be up early - * this can create problems with just using this_cpu_read() */ - if (!slab_is_available() || !this_cpu_read(text_poke_area)) + if (!this_cpu_read(*PTRRELOC(&text_poke_area))) return __patch_instruction(addr, instr); local_irq_save(flags); From 44a4adbc9949653a89e027a86caf373ec49a2fd1 Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Mon, 16 Oct 2017 16:21:35 +1100 Subject: [PATCH 0218/3326] powerpc/mm/radix: Fix crashes on Power9 DD1 with radix MMU and STRICT_RWX commit f79ad50ea3c73fb1ea5b09e95c864e5bb263adfb upstream. When using the radix MMU on Power9 DD1, to work around a hardware problem, radix__pte_update() is required to do a two stage update of the PTE. First we write a zero value into the PTE, then we flush the TLB, and then we write the new PTE value. In the normal case that works OK, but it does not work if we're updating the PTE that maps the code we're executing, because the mapping is removed by the TLB flush and we can no longer execute from it. Unfortunately the STRICT_RWX code needs to do exactly that. The exact symptoms when we hit this case vary, sometimes we print an oops and then get stuck after that, but I've also seen a machine just get stuck continually page faulting with no oops printed. The variance is presumably due to the exact layout of the text and the page size used for the mappings. In all cases we are unable to boot to a shell. There are possible solutions such as creating a second mapping of the TLB flush code, executing from that, and then jumping back to the original. However we don't want to add that level of complexity for a DD1 work around. So just detect that we're running on Power9 DD1 and refrain from changing the permissions, effectively disabling STRICT_RWX on Power9 DD1. Fixes: 7614ff3272a1 ("powerpc/mm/radix: Implement STRICT_RWX/mark_rodata_ro() for Radix") Reported-by: Andrew Jeffery [Changelog as suggested by Michael Ellerman ] Signed-off-by: Balbir Singh Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/mm/pgtable-radix.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 39c252b54d1618..cfbbee941a76b1 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -169,6 +169,16 @@ void radix__mark_rodata_ro(void) { unsigned long start, end; + /* + * mark_rodata_ro() will mark itself as !writable at some point. + * Due to DD1 workaround in radix__pte_update(), we'll end up with + * an invalid pte and the system will crash quite severly. + */ + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { + pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n"); + return; + } + start = (unsigned long)_stext; end = (unsigned long)__init_begin; From 0b090846da0a14d24a2c943d3ae030692594c9f6 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 16 Oct 2017 00:13:41 +0530 Subject: [PATCH 0219/3326] powerpc/perf/imc: Use cpu_to_node() not topology_physical_package_id() commit f3f1dfd600ff82b18b7ea73d80eb27f476a6aa97 upstream. init_imc_pmu() uses topology_physical_package_id() to detect the node id of the processor it is on to get local memory, but that's wrong, and can lead to crashes. Fix it to use cpu_to_node(). Fixes: 885dcd709ba9 ("powerpc/perf: Add nest IMC PMU support") Reported-By: Rob Lippert Tested-By: Madhavan Srinivasan Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/perf/imc-pmu.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 36344117c680b9..cf64e16f92c255 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -467,7 +467,7 @@ static int nest_imc_event_init(struct perf_event *event) * Nest HW counter memory resides in a per-chip reserve-memory (HOMER). * Get the base memory addresss for this cpu. */ - chip_id = topology_physical_package_id(event->cpu); + chip_id = cpu_to_chip_id(event->cpu); pcni = pmu->mem_info; do { if (pcni->id == chip_id) { @@ -524,19 +524,19 @@ static int nest_imc_event_init(struct perf_event *event) */ static int core_imc_mem_init(int cpu, int size) { - int phys_id, rc = 0, core_id = (cpu / threads_per_core); + int nid, rc = 0, core_id = (cpu / threads_per_core); struct imc_mem_info *mem_info; /* * alloc_pages_node() will allocate memory for core in the * local node only. */ - phys_id = topology_physical_package_id(cpu); + nid = cpu_to_node(cpu); mem_info = &core_imc_pmu->mem_info[core_id]; mem_info->id = core_id; /* We need only vbase for core counters */ - mem_info->vbase = page_address(alloc_pages_node(phys_id, + mem_info->vbase = page_address(alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | __GFP_NOWARN, get_order(size))); if (!mem_info->vbase) @@ -797,14 +797,14 @@ static int core_imc_event_init(struct perf_event *event) static int thread_imc_mem_alloc(int cpu_id, int size) { u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id); - int phys_id = topology_physical_package_id(cpu_id); + int nid = cpu_to_node(cpu_id); if (!local_mem) { /* * This case could happen only once at start, since we dont * free the memory in cpu offline path. */ - local_mem = page_address(alloc_pages_node(phys_id, + local_mem = page_address(alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | __GFP_NOWARN, get_order(size))); if (!local_mem) From 586fa9ed8b4916c84f1284020843c825fff97e2c Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Thu, 31 Aug 2017 21:55:57 +0530 Subject: [PATCH 0220/3326] powerpc/signal: Properly handle return value from uprobe_deny_signal() commit 46725b17f1c6c815a41429259b3f070c01e71bc1 upstream. When a uprobe is installed on an instruction that we currently do not emulate, we copy the instruction into a xol buffer and single step that instruction. If that instruction generates a fault, we abort the single stepping before invoking the signal handler. Once the signal handler is done, the uprobe trap is hit again since the instruction is retried and the process repeats. We use uprobe_deny_signal() to detect if the xol instruction triggered a signal. If so, we clear TIF_SIGPENDING and set TIF_UPROBE so that the signal is not handled until after the single stepping is aborted. In this case, uprobe_deny_signal() returns true and get_signal() ends up returning 0. However, in do_signal(), we are not looking at the return value, but depending on ksig.sig for further action, all with an uninitialized ksig that is not touched in this scenario. Fix the same by initializing ksig.sig to 0. Fixes: 129b69df9c90 ("powerpc: Use get_signal() signal_setup_done()") Reported-by: Anton Blanchard Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index e9436c5e1e0940..3d7539b90010c6 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -103,7 +103,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, static void do_signal(struct task_struct *tsk) { sigset_t *oldset = sigmask_to_save(); - struct ksignal ksig; + struct ksignal ksig = { .sig = 0 }; int ret; int is32 = is_32bit_task(); From cee5a6e8c3966b146a255f121425a064559a0767 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 14 Nov 2017 15:48:47 +1100 Subject: [PATCH 0221/3326] powerpc/64s: Fix masking of SRR1 bits on instruction fault commit 475b581ff57bc01437cbc680e281869918447763 upstream. On 64-bit Book3s, when we take an instruction fault the reason for the fault may be reported in SRR1. For data faults the reason is reported in DSISR (Data Storage Instruction Status Register). The reasons reported in each do not necessarily correspond, so we mask the SRR1 bits before copying them to the DSISR, which is then used by the page fault code. Prior to commit b4c001dc44f0 ("powerpc/mm: Use symbolic constants for filtering SRR1 bits on ISIs") we used a hard-coded mask of 0x58200000, which corresponds to: DSISR_NOHPTE 0x40000000 /* no translation found */ DSISR_NOEXEC_OR_G 0x10000000 /* exec of no-exec or guarded */ DSISR_PROTFAULT 0x08000000 /* protection fault */ DSISR_KEYFAULT 0x00200000 /* Storage Key fault */ That commit added a #define for the mask, DSISR_SRR1_MATCH_64S, but incorrectly used a different similarly named DSISR_BAD_FAULT_64S. This had the effect of changing the mask to 0xa43a0000, which omits everything but DSISR_KEYFAULT. Luckily this had no visible effect, because in practice we hardly use the DSISR bits. The lack of DSISR_NOHPTE means a TLB flush optimisation was missed in the native HPTE code, and DSISR_NOEXEC_OR_G and DSISR_PROTFAULT are both only used to trigger rare warnings. So we got lucky, but let's fix it. The new value only has bits between 17 and 30 set, so we can continue to use andis. Fixes: b4c001dc44f0 ("powerpc/mm: Use symbolic constants for filtering SRR1 bits on ISIs") Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/exceptions-64s.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 1c80bd292e481d..06598142d755c7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -542,7 +542,7 @@ EXC_COMMON_BEGIN(instruction_access_common) RECONCILE_IRQ_STATE(r10, r11) ld r12,_MSR(r1) ld r3,_NIP(r1) - andis. r4,r12,DSISR_BAD_FAULT_64S@h + andis. r4,r12,DSISR_SRR1_MATCH_64S@h li r5,0x400 std r3,_DAR(r1) std r4,_DSISR(r1) From de9c35508f80fc77804402f0d5a50885b43e3339 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 10 Nov 2017 04:27:39 +1100 Subject: [PATCH 0222/3326] powerpc/64s/radix: Fix 128TB-512TB virtual address boundary case allocation commit 85e3f1adcb9d49300b0a943bb93f9604be375bfb upstream. Radix VA space allocations test addresses against mm->task_size which is 512TB, even in cases where the intention is to limit allocation to below 128TB. This results in mmap with a hint address below 128TB but address + length above 128TB succeeding when it should fail (as hash does after the previous patch). Set the high address limit to be considered up front, and base subsequent allocation checks on that consistently. Fixes: f4ea6dcb08ea ("powerpc/mm: Enable mappings above 128TB") Signed-off-by: Nicholas Piggin Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/mm/hugetlbpage-radix.c | 26 +++++++++----- arch/powerpc/mm/mmap.c | 55 +++++++++++++++++------------ 2 files changed, 50 insertions(+), 31 deletions(-) diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c index 558e9d3891bfc8..bd022d16745c25 100644 --- a/arch/powerpc/mm/hugetlbpage-radix.c +++ b/arch/powerpc/mm/hugetlbpage-radix.c @@ -49,17 +49,28 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct hstate *h = hstate_file(file); + int fixed = (flags & MAP_FIXED); + unsigned long high_limit; struct vm_unmapped_area_info info; - if (unlikely(addr > mm->context.addr_limit && addr < TASK_SIZE)) - mm->context.addr_limit = TASK_SIZE; + high_limit = DEFAULT_MAP_WINDOW; + if (addr >= high_limit || (fixed && (addr + len > high_limit))) + high_limit = TASK_SIZE; if (len & ~huge_page_mask(h)) return -EINVAL; - if (len > mm->task_size) + if (len > high_limit) return -ENOMEM; + if (fixed) { + if (addr > high_limit - len) + return -ENOMEM; + } - if (flags & MAP_FIXED) { + if (unlikely(addr > mm->context.addr_limit && + mm->context.addr_limit != TASK_SIZE)) + mm->context.addr_limit = TASK_SIZE; + + if (fixed) { if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; @@ -68,7 +79,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); - if (mm->task_size - len >= addr && + if (high_limit - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -79,12 +90,9 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; - info.high_limit = current->mm->mmap_base; + info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; - if (addr > DEFAULT_MAP_WINDOW) - info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW; - return vm_unmapped_area(&info); } diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 5d78b193fec414..6d476a7b56112b 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -106,22 +106,32 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; + int fixed = (flags & MAP_FIXED); + unsigned long high_limit; struct vm_unmapped_area_info info; + high_limit = DEFAULT_MAP_WINDOW; + if (addr >= high_limit || (fixed && (addr + len > high_limit))) + high_limit = TASK_SIZE; + + if (len > high_limit) + return -ENOMEM; + if (fixed) { + if (addr > high_limit - len) + return -ENOMEM; + } + if (unlikely(addr > mm->context.addr_limit && mm->context.addr_limit != TASK_SIZE)) mm->context.addr_limit = TASK_SIZE; - if (len > mm->task_size - mmap_min_addr) - return -ENOMEM; - - if (flags & MAP_FIXED) + if (fixed) return addr; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (mm->task_size - len >= addr && addr >= mmap_min_addr && + if (high_limit - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -129,13 +139,9 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; + info.high_limit = high_limit; info.align_mask = 0; - if (unlikely(addr > DEFAULT_MAP_WINDOW)) - info.high_limit = mm->context.addr_limit; - else - info.high_limit = DEFAULT_MAP_WINDOW; - return vm_unmapped_area(&info); } @@ -149,37 +155,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; + int fixed = (flags & MAP_FIXED); + unsigned long high_limit; struct vm_unmapped_area_info info; + high_limit = DEFAULT_MAP_WINDOW; + if (addr >= high_limit || (fixed && (addr + len > high_limit))) + high_limit = TASK_SIZE; + + if (len > high_limit) + return -ENOMEM; + if (fixed) { + if (addr > high_limit - len) + return -ENOMEM; + } + if (unlikely(addr > mm->context.addr_limit && mm->context.addr_limit != TASK_SIZE)) mm->context.addr_limit = TASK_SIZE; - /* requested length too big for entire address space */ - if (len > mm->task_size - mmap_min_addr) - return -ENOMEM; - - if (flags & MAP_FIXED) + if (fixed) return addr; - /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (mm->task_size - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma))) + if (high_limit - len >= addr && addr >= mmap_min_addr && + (!vma || addr + len <= vm_start_gap(vma))) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); - info.high_limit = mm->mmap_base; + info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW); info.align_mask = 0; - if (addr > DEFAULT_MAP_WINDOW) - info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW; - addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) return addr; From 45567ab5983900c5363019c5465b1dc46fa0218d Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 13 Nov 2017 23:17:28 +1100 Subject: [PATCH 0223/3326] powerpc/64s/hash: Fix 512T hint detection to use >= 128T commit 7ece370996b694ae263025e056ad785afc1be5ab upstream. Currently userspace is able to request mmap() search between 128T-512T by specifying a hint address that is greater than 128T. But that means a hint of 128T exactly will return an address below 128T, which is confusing and wrong. So fix the logic to check the hint is greater than *or equal* to 128T. Fixes: f4ea6dcb08ea ("powerpc/mm: Enable mappings above 128TB") Suggested-by: Aneesh Kumar K.V Suggested-by: Nicholas Piggin [mpe: Split out of Nick's bigger patch] Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/mm/slice.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 45f6740dd407da..48a5312103a1f2 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -419,7 +419,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, /* * Check if we need to expland slice area. */ - if (unlikely(addr > mm->context.addr_limit && + if (unlikely(addr >= mm->context.addr_limit && mm->context.addr_limit != TASK_SIZE)) { mm->context.addr_limit = TASK_SIZE; on_each_cpu(slice_flush_segments, mm, 1); @@ -427,7 +427,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, /* * This mmap request can allocate upt to 512TB */ - if (addr > DEFAULT_MAP_WINDOW) + if (addr >= DEFAULT_MAP_WINDOW) high_limit = mm->context.addr_limit; else high_limit = DEFAULT_MAP_WINDOW; From 2279e9c895778cc8942b82e6e9fa153d5024acc2 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 10 Nov 2017 04:27:36 +1100 Subject: [PATCH 0224/3326] powerpc/64s/hash: Fix 128TB-512TB virtual address boundary case allocation commit 6a72dc038b615229a1b285829d6c8378d15c2347 upstream. When allocating VA space with a hint that crosses 128TB, the SLB addr_limit variable is not expanded if addr is not > 128TB, but the slice allocation looks at task_size, which is 512TB. This results in slice_check_fit() incorrectly succeeding because the slice_count truncates off bit 128 of the requested mask, so the comparison to the available mask succeeds. Fix this by using mm->context.addr_limit instead of mm->task_size for testing allocation limits. This causes such allocations to fail. Fixes: f4ea6dcb08ea ("powerpc/mm: Enable mappings above 128TB") Reported-by: Florian Weimer Signed-off-by: Nicholas Piggin Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/mm/slice.c | 50 ++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 48a5312103a1f2..3889201b560c35 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -96,7 +96,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, { struct vm_area_struct *vma; - if ((mm->task_size - len) < addr) + if ((mm->context.addr_limit - len) < addr) return 0; vma = find_vma(mm, addr); return (!vma || (addr + len) <= vm_start_gap(vma)); @@ -133,7 +133,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret) if (!slice_low_has_vma(mm, i)) ret->low_slices |= 1u << i; - if (mm->task_size <= SLICE_LOW_TOP) + if (mm->context.addr_limit <= SLICE_LOW_TOP) return; for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) @@ -412,25 +412,31 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, struct slice_mask compat_mask; int fixed = (flags & MAP_FIXED); int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); + unsigned long page_size = 1UL << pshift; struct mm_struct *mm = current->mm; unsigned long newaddr; unsigned long high_limit; - /* - * Check if we need to expland slice area. - */ - if (unlikely(addr >= mm->context.addr_limit && - mm->context.addr_limit != TASK_SIZE)) { - mm->context.addr_limit = TASK_SIZE; + high_limit = DEFAULT_MAP_WINDOW; + if (addr >= high_limit) + high_limit = TASK_SIZE; + + if (len > high_limit) + return -ENOMEM; + if (len & (page_size - 1)) + return -EINVAL; + if (fixed) { + if (addr & (page_size - 1)) + return -EINVAL; + if (addr > high_limit - len) + return -ENOMEM; + } + + if (high_limit > mm->context.addr_limit) { + mm->context.addr_limit = high_limit; on_each_cpu(slice_flush_segments, mm, 1); } - /* - * This mmap request can allocate upt to 512TB - */ - if (addr >= DEFAULT_MAP_WINDOW) - high_limit = mm->context.addr_limit; - else - high_limit = DEFAULT_MAP_WINDOW; + /* * init different masks */ @@ -446,27 +452,19 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, /* Sanity checks */ BUG_ON(mm->task_size == 0); + BUG_ON(mm->context.addr_limit == 0); VM_BUG_ON(radix_enabled()); slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n", addr, len, flags, topdown); - if (len > mm->task_size) - return -ENOMEM; - if (len & ((1ul << pshift) - 1)) - return -EINVAL; - if (fixed && (addr & ((1ul << pshift) - 1))) - return -EINVAL; - if (fixed && addr > (mm->task_size - len)) - return -ENOMEM; - /* If hint, make sure it matches our alignment restrictions */ if (!fixed && addr) { - addr = _ALIGN_UP(addr, 1ul << pshift); + addr = _ALIGN_UP(addr, page_size); slice_dbg(" aligned addr=%lx\n", addr); /* Ignore hint if it's too large or overlaps a VMA */ - if (addr > mm->task_size - len || + if (addr > high_limit - len || !slice_area_is_free(mm, addr, len)) addr = 0; } From 34c8d3ffb64883b9279c03e79c1b08c9a1931940 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 10 Nov 2017 04:27:37 +1100 Subject: [PATCH 0225/3326] powerpc/64s/hash: Fix fork() with 512TB process address space commit effc1b25088502fbd30305c79773de2d1f7470a6 upstream. Hash unconditionally resets the addr_limit to default (128TB) when the mm context is initialised. If a process has > 128TB mappings when it forks, the child will not get the 512TB addr_limit, so accesses to valid > 128TB mappings will fail in the child. Fix this by only resetting the addr_limit to default if it was 0. Non zero indicates it was duplicated from the parent (0 means exec()). Fixes: f4ea6dcb08ea ("powerpc/mm: Enable mappings above 128TB") Signed-off-by: Nicholas Piggin Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/mm/mmu_context_book3s64.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index 05e15386d4cb35..b94fb62e60fde9 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -93,11 +93,11 @@ static int hash__init_new_context(struct mm_struct *mm) return index; /* - * We do switch_slb() early in fork, even before we setup the - * mm->context.addr_limit. Default to max task size so that we copy the - * default values to paca which will help us to handle slb miss early. + * In the case of exec, use the default limit, + * otherwise inherit it from the mm we are duplicating. */ - mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64; + if (!mm->context.addr_limit) + mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64; /* * The old code would re-promote on fork, we don't do that when using From 275e5c44679dcaed07007a8fbc634721afc7020d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 10 Nov 2017 04:27:38 +1100 Subject: [PATCH 0226/3326] powerpc/64s/hash: Allow MAP_FIXED allocations to cross 128TB boundary commit 35602f82d0c765f991420e319c8d3a596c921eb8 upstream. While mapping hints with a length that cross 128TB are disallowed, MAP_FIXED allocations that cross 128TB are allowed. These are failing on hash (on radix they succeed). Add an additional case for fixed mappings to expand the addr_limit when crossing 128TB. Fixes: f4ea6dcb08ea ("powerpc/mm: Enable mappings above 128TB") Signed-off-by: Nicholas Piggin Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/mm/slice.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 3889201b560c35..a4f93699194b6a 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -418,7 +418,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, unsigned long high_limit; high_limit = DEFAULT_MAP_WINDOW; - if (addr >= high_limit) + if (addr >= high_limit || (fixed && (addr + len > high_limit))) high_limit = TASK_SIZE; if (len > high_limit) From 7da7bed3c80ffc818ea3955d1fb21df119c3e9d7 Mon Sep 17 00:00:00 2001 From: Michele Baldessari Date: Mon, 6 Nov 2017 08:50:22 -0500 Subject: [PATCH 0227/3326] media: Don't do DMA on stack for firmware upload in the AS102 driver commit b3120d2cc447ee77b9d69bf4ad7b452c9adb4d39 upstream. Firmware load on AS102 is using the stack which is not allowed any longer. We currently fail with: kernel: transfer buffer not dma capable kernel: ------------[ cut here ]------------ kernel: WARNING: CPU: 0 PID: 598 at drivers/usb/core/hcd.c:1595 usb_hcd_map_urb_for_dma+0x41d/0x620 kernel: Modules linked in: amd64_edac_mod(-) edac_mce_amd as102_fe dvb_as102(+) kvm_amd kvm snd_hda_codec_realtek dvb_core snd_hda_codec_generic snd_hda_codec_hdmi snd_hda_intel snd_hda_codec irqbypass crct10dif_pclmul crc32_pclmul snd_hda_core snd_hwdep snd_seq ghash_clmulni_intel sp5100_tco fam15h_power wmi k10temp i2c_piix4 snd_seq_device snd_pcm snd_timer parport_pc parport tpm_infineon snd tpm_tis soundcore tpm_tis_core tpm shpchp acpi_cpufreq xfs libcrc32c amdgpu amdkfd amd_iommu_v2 radeon hid_logitech_hidpp i2c_algo_bit drm_kms_helper crc32c_intel ttm drm r8169 mii hid_logitech_dj kernel: CPU: 0 PID: 598 Comm: systemd-udevd Not tainted 4.13.10-200.fc26.x86_64 #1 kernel: Hardware name: ASUS All Series/AM1I-A, BIOS 0505 03/13/2014 kernel: task: ffff979933b24c80 task.stack: ffffaf83413a4000 kernel: RIP: 0010:usb_hcd_map_urb_for_dma+0x41d/0x620 systemd-fsck[659]: /dev/sda2: clean, 49/128016 files, 268609/512000 blocks kernel: RSP: 0018:ffffaf83413a7728 EFLAGS: 00010282 systemd-udevd[604]: link_config: autonegotiation is unset or enabled, the speed and duplex are not writable. kernel: RAX: 000000000000001f RBX: ffff979930bce780 RCX: 0000000000000000 kernel: RDX: 0000000000000000 RSI: ffff97993ec0e118 RDI: ffff97993ec0e118 kernel: RBP: ffffaf83413a7768 R08: 000000000000039a R09: 0000000000000000 kernel: R10: 0000000000000001 R11: 00000000ffffffff R12: 00000000fffffff5 kernel: R13: 0000000001400000 R14: 0000000000000001 R15: ffff979930806800 kernel: FS: 00007effaca5c8c0(0000) GS:ffff97993ec00000(0000) knlGS:0000000000000000 kernel: CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 kernel: CR2: 00007effa9fca962 CR3: 0000000233089000 CR4: 00000000000406f0 kernel: Call Trace: kernel: usb_hcd_submit_urb+0x493/0xb40 kernel: ? page_cache_tree_insert+0x100/0x100 kernel: ? xfs_iunlock+0xd5/0x100 [xfs] kernel: ? xfs_file_buffered_aio_read+0x57/0xc0 [xfs] kernel: usb_submit_urb+0x22d/0x560 kernel: usb_start_wait_urb+0x6e/0x180 kernel: usb_bulk_msg+0xb8/0x160 kernel: as102_send_ep1+0x49/0xe0 [dvb_as102] kernel: ? devres_add+0x3f/0x50 kernel: as102_firmware_upload.isra.0+0x1dc/0x210 [dvb_as102] kernel: as102_fw_upload+0xb6/0x1f0 [dvb_as102] kernel: as102_dvb_register+0x2af/0x2d0 [dvb_as102] kernel: as102_usb_probe+0x1f3/0x260 [dvb_as102] kernel: usb_probe_interface+0x124/0x300 kernel: driver_probe_device+0x2ff/0x450 kernel: __driver_attach+0xa4/0xe0 kernel: ? driver_probe_device+0x450/0x450 kernel: bus_for_each_dev+0x6e/0xb0 kernel: driver_attach+0x1e/0x20 kernel: bus_add_driver+0x1c7/0x270 kernel: driver_register+0x60/0xe0 kernel: usb_register_driver+0x81/0x150 kernel: ? 0xffffffffc0807000 kernel: as102_usb_driver_init+0x1e/0x1000 [dvb_as102] kernel: do_one_initcall+0x50/0x190 kernel: ? __vunmap+0x81/0xb0 kernel: ? kfree+0x154/0x170 kernel: ? kmem_cache_alloc_trace+0x15f/0x1c0 kernel: ? do_init_module+0x27/0x1e9 kernel: do_init_module+0x5f/0x1e9 kernel: load_module+0x2602/0x2c30 kernel: SYSC_init_module+0x170/0x1a0 kernel: ? SYSC_init_module+0x170/0x1a0 kernel: SyS_init_module+0xe/0x10 kernel: do_syscall_64+0x67/0x140 kernel: entry_SYSCALL64_slow_path+0x25/0x25 kernel: RIP: 0033:0x7effab6cf3ea kernel: RSP: 002b:00007fff5cfcbbc8 EFLAGS: 00000246 ORIG_RAX: 00000000000000af kernel: RAX: ffffffffffffffda RBX: 00005569e0b83760 RCX: 00007effab6cf3ea kernel: RDX: 00007effac2099c5 RSI: 0000000000009a13 RDI: 00005569e0b98c50 kernel: RBP: 00007effac2099c5 R08: 00005569e0b83ed0 R09: 0000000000001d80 kernel: R10: 00007effab98db00 R11: 0000000000000246 R12: 00005569e0b98c50 kernel: R13: 00005569e0b81c60 R14: 0000000000020000 R15: 00005569dfadfdf7 kernel: Code: 48 39 c8 73 30 80 3d 59 60 9d 00 00 41 bc f5 ff ff ff 0f 85 26 ff ff ff 48 c7 c7 b8 6b d0 92 c6 05 3f 60 9d 00 01 e8 24 3d ad ff <0f> ff 8b 53 64 e9 09 ff ff ff 65 48 8b 0c 25 00 d3 00 00 48 8b kernel: ---[ end trace c4cae366180e70ec ]--- kernel: as10x_usb: error during firmware upload part1 Let's allocate the the structure dynamically so we can get the firmware loaded correctly: [ 14.243057] as10x_usb: firmware: as102_data1_st.hex loaded with success [ 14.500777] as10x_usb: firmware: as102_data2_st.hex loaded with success Signed-off-by: Michele Baldessari Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/as102/as102_fw.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c index 5a28ce3a1d4926..38dbc128340d23 100644 --- a/drivers/media/usb/as102/as102_fw.c +++ b/drivers/media/usb/as102/as102_fw.c @@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, unsigned char *cmd, const struct firmware *firmware) { - struct as10x_fw_pkt_t fw_pkt; + struct as10x_fw_pkt_t *fw_pkt; int total_read_bytes = 0, errno = 0; unsigned char addr_has_changed = 0; + fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL); + if (!fw_pkt) + return -ENOMEM; + + for (total_read_bytes = 0; total_read_bytes < firmware->size; ) { int read_bytes = 0, data_len = 0; /* parse intel hex line */ read_bytes = parse_hex_line( (u8 *) (firmware->data + total_read_bytes), - fw_pkt.raw.address, - fw_pkt.raw.data, + fw_pkt->raw.address, + fw_pkt->raw.data, &data_len, &addr_has_changed); @@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, /* detect the end of file */ total_read_bytes += read_bytes; if (total_read_bytes == firmware->size) { - fw_pkt.u.request[0] = 0x00; - fw_pkt.u.request[1] = 0x03; + fw_pkt->u.request[0] = 0x00; + fw_pkt->u.request[1] = 0x03; /* send EOF command */ errno = bus_adap->ops->upload_fw_pkt(bus_adap, (uint8_t *) - &fw_pkt, 2, 0); + fw_pkt, 2, 0); if (errno < 0) goto error; } else { if (!addr_has_changed) { /* prepare command to send */ - fw_pkt.u.request[0] = 0x00; - fw_pkt.u.request[1] = 0x01; + fw_pkt->u.request[0] = 0x00; + fw_pkt->u.request[1] = 0x01; - data_len += sizeof(fw_pkt.u.request); - data_len += sizeof(fw_pkt.raw.address); + data_len += sizeof(fw_pkt->u.request); + data_len += sizeof(fw_pkt->raw.address); /* send cmd to device */ errno = bus_adap->ops->upload_fw_pkt(bus_adap, (uint8_t *) - &fw_pkt, + fw_pkt, data_len, 0); if (errno < 0) @@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, } } error: + kfree(fw_pkt); return (errno == 0) ? total_read_bytes : errno; } From e82273a266d03aeb00f0d27762ca306abbf14e01 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Sun, 8 Oct 2017 14:18:52 -0400 Subject: [PATCH 0228/3326] media: rc: check for integer overflow commit 3e45067f94bbd61dec0619b1c32744eb0de480c8 upstream. The ioctl LIRC_SET_REC_TIMEOUT would set a timeout of 704ns if called with a timeout of 4294968us. Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/rc/ir-lirc-codec.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c index d2223c04e9ad08..4c8f456238bcab 100644 --- a/drivers/media/rc/ir-lirc-codec.c +++ b/drivers/media/rc/ir-lirc-codec.c @@ -298,11 +298,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd, if (!dev->max_timeout) return -ENOTTY; + /* Check for multiply overflow */ + if (val > U32_MAX / 1000) + return -EINVAL; + tmp = val * 1000; - if (tmp < dev->min_timeout || - tmp > dev->max_timeout) - return -EINVAL; + if (tmp < dev->min_timeout || tmp > dev->max_timeout) + return -EINVAL; if (dev->s_timeout) ret = dev->s_timeout(dev, tmp); From cfec97f26d299f64cb5dd94f5e391d74c9977486 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Sun, 1 Oct 2017 16:38:29 -0400 Subject: [PATCH 0229/3326] media: rc: nec decoder should not send both repeat and keycode commit 829bbf268894d0866bb9dd2b1e430cfa5c5f0779 upstream. When receiving an nec repeat, rc_repeat() is called and then rc_keydown() with the last decoded scancode. That last call is redundant. Fixes: 265a2988d202 ("media: rc-core: consistent use of rc_repeat()") Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/rc/ir-nec-decoder.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c index 817c18f2ddd158..a95d09acc22a56 100644 --- a/drivers/media/rc/ir-nec-decoder.c +++ b/drivers/media/rc/ir-nec-decoder.c @@ -87,8 +87,6 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev) data->state = STATE_BIT_PULSE; return 0; } else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) { - rc_repeat(dev); - IR_dprintk(1, "Repeat last key\n"); data->state = STATE_TRAILER_PULSE; return 0; } @@ -151,19 +149,26 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev) if (!geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2)) break; - address = bitrev8((data->bits >> 24) & 0xff); - not_address = bitrev8((data->bits >> 16) & 0xff); - command = bitrev8((data->bits >> 8) & 0xff); - not_command = bitrev8((data->bits >> 0) & 0xff); + if (data->count == NEC_NBITS) { + address = bitrev8((data->bits >> 24) & 0xff); + not_address = bitrev8((data->bits >> 16) & 0xff); + command = bitrev8((data->bits >> 8) & 0xff); + not_command = bitrev8((data->bits >> 0) & 0xff); + + scancode = ir_nec_bytes_to_scancode(address, + not_address, + command, + not_command, + &rc_proto); - scancode = ir_nec_bytes_to_scancode(address, not_address, - command, not_command, - &rc_proto); + if (data->is_nec_x) + data->necx_repeat = true; - if (data->is_nec_x) - data->necx_repeat = true; + rc_keydown(dev, rc_proto, scancode, 0); + } else { + rc_repeat(dev); + } - rc_keydown(dev, rc_proto, scancode, 0); data->state = STATE_INACTIVE; return 0; } From 5a482b8a75500ec7516f3d11c7fb063e3f4abc45 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 21 Sep 2017 05:40:18 -0300 Subject: [PATCH 0230/3326] cx231xx-cards: fix NULL-deref on missing association descriptor commit 6c3b047fa2d2286d5e438bcb470c7b1a49f415f6 upstream. Make sure to check that we actually have an Interface Association Descriptor before dereferencing it during probe to avoid dereferencing a NULL-pointer. Fixes: e0d3bafd0258 ("V4L/DVB (10954): Add cx231xx USB driver") Reported-by: Andrey Konovalov Signed-off-by: Johan Hovold Tested-by: Andrey Konovalov Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/cx231xx/cx231xx-cards.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index e0daa9b6c2a0e3..9b742d569fb511 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c @@ -1684,7 +1684,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface, nr = dev->devno; assoc_desc = udev->actconfig->intf_assoc[0]; - if (assoc_desc->bFirstInterface != ifnum) { + if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) { dev_err(d, "Not found matching IAD interface\n"); retval = -ENODEV; goto err_if; From ab7724bf5bc1811fc83743627a8acbcd17ba7f7b Mon Sep 17 00:00:00 2001 From: Ricardo Ribalda Delgado Date: Tue, 17 Oct 2017 11:48:50 -0400 Subject: [PATCH 0231/3326] media: v4l2-ctrl: Fix flags field on Control events commit 9cac9d2fb2fe0e0cadacdb94415b3fe49e3f724f upstream. VIDIOC_DQEVENT and VIDIOC_QUERY_EXT_CTRL should give the same output for the control flags field. This patch creates a new function user_flags(), that calculates the user exported flags value (which is different than the kernel internal flags structure). This function is then used by all the code that exports the internal flags to userspace. Reported-by: Dimitrios Katsaros Signed-off-by: Ricardo Ribalda Delgado Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-ctrls.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index dd1db678718c80..8033d6f73501bf 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1227,6 +1227,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, } EXPORT_SYMBOL(v4l2_ctrl_fill); +static u32 user_flags(const struct v4l2_ctrl *ctrl) +{ + u32 flags = ctrl->flags; + + if (ctrl->is_ptr) + flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; + + return flags; +} + static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes) { memset(ev->reserved, 0, sizeof(ev->reserved)); @@ -1234,7 +1244,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change ev->id = ctrl->id; ev->u.ctrl.changes = changes; ev->u.ctrl.type = ctrl->type; - ev->u.ctrl.flags = ctrl->flags; + ev->u.ctrl.flags = user_flags(ctrl); if (ctrl->is_ptr) ev->u.ctrl.value64 = 0; else @@ -2577,10 +2587,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr else qc->id = ctrl->id; strlcpy(qc->name, ctrl->name, sizeof(qc->name)); - qc->flags = ctrl->flags; + qc->flags = user_flags(ctrl); qc->type = ctrl->type; - if (ctrl->is_ptr) - qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; qc->elem_size = ctrl->elem_size; qc->elems = ctrl->elems; qc->nr_of_dims = ctrl->nr_of_dims; From 242ceaf6dc6159e71078fb6d53ecf62d2627da9f Mon Sep 17 00:00:00 2001 From: Stanimir Varbanov Date: Mon, 9 Oct 2017 14:24:57 +0200 Subject: [PATCH 0232/3326] media: venus: fix wrong size on dma_free commit cd1a77e3c9cc6dbb57f02aa50e1740fc144d2dad upstream. This change will fix an issue with dma_free size found with DMA API debug enabled. Signed-off-by: Stanimir Varbanov Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/platform/qcom/venus/hfi_venus.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c index 1caae8feaa36bb..734ce11b0ed0e2 100644 --- a/drivers/media/platform/qcom/venus/hfi_venus.c +++ b/drivers/media/platform/qcom/venus/hfi_venus.c @@ -344,7 +344,7 @@ static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc, desc->attrs = DMA_ATTR_WRITE_COMBINE; desc->size = ALIGN(size, SZ_4K); - desc->kva = dma_alloc_attrs(dev, size, &desc->da, GFP_KERNEL, + desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL, desc->attrs); if (!desc->kva) return -ENOMEM; @@ -710,10 +710,8 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev) if (ret) return ret; - hdev->ifaceq_table.kva = desc.kva; - hdev->ifaceq_table.da = desc.da; - hdev->ifaceq_table.size = IFACEQ_TABLE_SIZE; - offset = hdev->ifaceq_table.size; + hdev->ifaceq_table = desc; + offset = IFACEQ_TABLE_SIZE; for (i = 0; i < IFACEQ_NUM; i++) { queue = &hdev->queues[i]; @@ -755,9 +753,7 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev) if (ret) { hdev->sfr.da = 0; } else { - hdev->sfr.da = desc.da; - hdev->sfr.kva = desc.kva; - hdev->sfr.size = ALIGNED_SFR_SIZE; + hdev->sfr = desc; sfr = hdev->sfr.kva; sfr->buf_size = ALIGNED_SFR_SIZE; } From 88a703f31bdf4b609e838495c733aa6c3f270c33 Mon Sep 17 00:00:00 2001 From: Stanimir Varbanov Date: Tue, 10 Oct 2017 09:52:36 +0200 Subject: [PATCH 0233/3326] media: venus: venc: fix bytesused v4l2_plane field commit 5232c37ce244db04fd50d160b92e40d2df46a2e9 upstream. This fixes wrongly filled bytesused field of v4l2_plane structure by include data_offset in the plane, Also fill data_offset and bytesused for capture type of buffers only. Signed-off-by: Stanimir Varbanov Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/platform/qcom/venus/venc.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c index 6f123a387cf9ef..3fcf0e9b7b2915 100644 --- a/drivers/media/platform/qcom/venus/venc.c +++ b/drivers/media/platform/qcom/venus/venc.c @@ -963,13 +963,12 @@ static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type, if (!vbuf) return; - vb = &vbuf->vb2_buf; - vb->planes[0].bytesused = bytesused; - vb->planes[0].data_offset = data_offset; - vbuf->flags = flags; if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + vb = &vbuf->vb2_buf; + vb2_set_plane_payload(vb, 0, bytesused + data_offset); + vb->planes[0].data_offset = data_offset; vb->timestamp = timestamp_us * NSEC_PER_USEC; vbuf->sequence = inst->sequence_cap++; } else { From 2e7706a2a5e989b3f02d083c3c8829a39ec8482f Mon Sep 17 00:00:00 2001 From: Stanimir Varbanov Date: Fri, 13 Oct 2017 16:13:17 +0200 Subject: [PATCH 0234/3326] media: venus: reimplement decoder stop command commit e69b987a97599456b95b5fef4aca8dcdb1505aea upstream. This addresses the wrong behavior of decoder stop command by rewriting it. These new implementation enqueue an empty buffer on the decoder input buffer queue to signal end-of-stream. The client should stop queuing buffers on the V4L2 Output queue and continue queuing/dequeuing buffers on Capture queue. This process will continue until the client receives a buffer with V4L2_BUF_FLAG_LAST flag raised, which means that this is last decoded buffer with data. Signed-off-by: Stanimir Varbanov Tested-by: Nicolas Dufresne Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/platform/qcom/venus/core.h | 2 -- drivers/media/platform/qcom/venus/helpers.c | 7 ----- drivers/media/platform/qcom/venus/hfi.c | 1 + drivers/media/platform/qcom/venus/vdec.c | 34 ++++++++++++++------- 4 files changed, 24 insertions(+), 20 deletions(-) diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h index cba092bcb76dda..a0fe80df0cbd54 100644 --- a/drivers/media/platform/qcom/venus/core.h +++ b/drivers/media/platform/qcom/venus/core.h @@ -194,7 +194,6 @@ struct venus_buffer { * @fh: a holder of v4l file handle structure * @streamon_cap: stream on flag for capture queue * @streamon_out: stream on flag for output queue - * @cmd_stop: a flag to signal encoder/decoder commands * @width: current capture width * @height: current capture height * @out_width: current output width @@ -258,7 +257,6 @@ struct venus_inst { } controls; struct v4l2_fh fh; unsigned int streamon_cap, streamon_out; - bool cmd_stop; u32 width; u32 height; u32 out_width; diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c index 9b2a401a4891c4..0ce9559a2924fd 100644 --- a/drivers/media/platform/qcom/venus/helpers.c +++ b/drivers/media/platform/qcom/venus/helpers.c @@ -623,13 +623,6 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb) mutex_lock(&inst->lock); - if (inst->cmd_stop) { - vbuf->flags |= V4L2_BUF_FLAG_LAST; - v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE); - inst->cmd_stop = false; - goto unlock; - } - v4l2_m2m_buf_queue(m2m_ctx, vbuf); if (!(inst->streamon_out & inst->streamon_cap)) diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c index c09490876516fb..ba29fd4d49847c 100644 --- a/drivers/media/platform/qcom/venus/hfi.c +++ b/drivers/media/platform/qcom/venus/hfi.c @@ -484,6 +484,7 @@ int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd) return -EINVAL; } +EXPORT_SYMBOL_GPL(hfi_session_process_buf); irqreturn_t hfi_isr_thread(int irq, void *dev_id) { diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index da611a5eb67037..c9e9576bb08a4e 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -469,8 +469,14 @@ static int vdec_subscribe_event(struct v4l2_fh *fh, static int vdec_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) { - if (cmd->cmd != V4L2_DEC_CMD_STOP) + switch (cmd->cmd) { + case V4L2_DEC_CMD_STOP: + if (cmd->flags & V4L2_DEC_CMD_STOP_TO_BLACK) + return -EINVAL; + break; + default: return -EINVAL; + } return 0; } @@ -479,6 +485,7 @@ static int vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) { struct venus_inst *inst = to_inst(file); + struct hfi_frame_data fdata = {0}; int ret; ret = vdec_try_decoder_cmd(file, fh, cmd); @@ -486,12 +493,23 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) return ret; mutex_lock(&inst->lock); - inst->cmd_stop = true; - mutex_unlock(&inst->lock); - hfi_session_flush(inst); + /* + * Implement V4L2_DEC_CMD_STOP by enqueue an empty buffer on decoder + * input to signal EOS. + */ + if (!(inst->streamon_out & inst->streamon_cap)) + goto unlock; + + fdata.buffer_type = HFI_BUFFER_INPUT; + fdata.flags |= HFI_BUFFERFLAG_EOS; + fdata.device_addr = 0xdeadbeef; - return 0; + ret = hfi_session_process_buf(inst, &fdata); + +unlock: + mutex_unlock(&inst->lock); + return ret; } static const struct v4l2_ioctl_ops vdec_ioctl_ops = { @@ -718,7 +736,6 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count) inst->reconfig = false; inst->sequence_cap = 0; inst->sequence_out = 0; - inst->cmd_stop = false; ret = vdec_init_session(inst); if (ret) @@ -807,11 +824,6 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type, vb->timestamp = timestamp_us * NSEC_PER_USEC; vbuf->sequence = inst->sequence_cap++; - if (inst->cmd_stop) { - vbuf->flags |= V4L2_BUF_FLAG_LAST; - inst->cmd_stop = false; - } - if (vbuf->flags & V4L2_BUF_FLAG_LAST) { const struct v4l2_event ev = { .type = V4L2_EVENT_EOS }; From 7a51c88c35ae6fbd858a50eef3f30719242659c5 Mon Sep 17 00:00:00 2001 From: Neil Armstrong Date: Wed, 11 Oct 2017 17:23:12 +0200 Subject: [PATCH 0235/3326] ARM64: dts: meson-gxl: Add alternate ARM Trusted Firmware reserved memory zone commit 4ee8e51b9edfe7845a094690a365c844e5a35b4b upstream. This year, Amlogic updated the ARM Trusted Firmware reserved memory mapping for Meson GXL SoCs and products sold since May 2017 uses this alternate reserved memory mapping. But products had been sold using the previous mapping. This issue has been explained in [1] and a dynamic solution is yet to be found to avoid loosing another 3Mbytes of reservable memory. In the meantime, this patch adds this alternate memory zone only for the GXL and GXM SoCs since GXBB based new products stopped earlier. [1] http://lists.infradead.org/pipermail/linux-amlogic/2017-October/004860.html Fixes: bba8e3f42736 ("ARM64: dts: meson-gx: Add firmware reserved memory zones") Reported-by: Jerome Brunet Signed-off-by: Neil Armstrong Signed-off-by: Kevin Hilman Signed-off-by: Greg Kroah-Hartman --- arch/arm64/boot/dts/amlogic/meson-gxl.dtsi | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index d8dd3298b15cfe..fb8d76a17bc5d4 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -49,6 +49,14 @@ / { compatible = "amlogic,meson-gxl"; + + reserved-memory { + /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */ + secmon_reserved_alt: secmon@05000000 { + reg = <0x0 0x05000000 0x0 0x300000>; + no-map; + }; + }; }; ðmac { From 98f581f6aa4f79dbad1d00271f334d813e0870c2 Mon Sep 17 00:00:00 2001 From: Oren Givon Date: Mon, 28 Aug 2017 10:33:38 +0300 Subject: [PATCH 0236/3326] iwlwifi: fix wrong struct for a000 device commit f7f5873bbd45a67d3097dfb55237ade2ad520184 upstream. The PCI ID (0x2720, 0x0070) was set with the config struct iwla000_2ax_cfg_hr instead of iwla000_2ac_cfg_hr_cdb. Fixes: 175b87c69253 ("iwlwifi: add the new a000_2ax series") Signed-off-by: Oren Givon Signed-off-by: Luca Coelho Cc: Thomas Backlund Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 858765fed8f856..3416c615599633 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -576,7 +576,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2720, 0x0000, iwla000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0070, iwla000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0078, iwla000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)}, #endif /* CONFIG_IWLMVM */ From 2b4c45dc7d1b149dd4742d0579cdba044bde3482 Mon Sep 17 00:00:00 2001 From: Oren Givon Date: Thu, 31 Aug 2017 13:15:09 +0300 Subject: [PATCH 0237/3326] iwlwifi: add a new a000 device commit d048b36b9654c4e0cf0d3576be2d1ed2a3084c6f upstream. Add a new a000 device with PCI ID (0x2720, 0x0030). Signed-off-by: Oren Givon Signed-off-by: Luca Coelho Cc: Thomas Backlund Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 3416c615599633..e0966f656376be 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -577,6 +577,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x34F0, 0x0070, iwla000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0078, iwla000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x2720, 0x0030, iwla000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)}, #endif /* CONFIG_IWLMVM */ From 150dc29e1d99f0a58d52ec9fe0c93bb5ae86df37 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Thu, 12 Oct 2017 11:20:50 +0300 Subject: [PATCH 0238/3326] iwlwifi: pcie: sort IDs for the 9000 series for easier comparisons commit 1105a337375258515ed09b92a83fd7bfd6775958 upstream. It's hard to find values that are missing in the list, so sorting the values and comparing them makes it much easier. To simplify this task, sort the devices in the list. Signed-off-by: Luca Coelho Cc: Thomas Backlund Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 84 +++++++++---------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index e0966f656376be..8867d8914ef4df 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -510,65 +510,65 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)}, /* 9000 Series */ - {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)}, /* a000 Series */ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)}, From 3e85efe021070671b8d773560b49b03a16a4c9ec Mon Sep 17 00:00:00 2001 From: Ihab Zhaika Date: Tue, 24 Oct 2017 17:04:24 +0300 Subject: [PATCH 0239/3326] iwlwifi: add new cards for a000 series commit 57b36f7fcb39c5eae8c1f463699f747af69643ba upstream. add four new PCI ID'S for a000 series Signed-off-by: Ihab Zhaika Signed-off-by: Luca Coelho Cc: Thomas Backlund Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 8867d8914ef4df..b7d86b6439ff0e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -579,6 +579,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0030, iwla000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0090, iwla000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)}, + #endif /* CONFIG_IWLMVM */ {0} From 2b2c1ae5b3cb13673db2394cd90ad5eec5afefed Mon Sep 17 00:00:00 2001 From: Ihab Zhaika Date: Tue, 24 Oct 2017 17:38:12 +0300 Subject: [PATCH 0240/3326] iwlwifi: add new cards for 8265 series commit 7cddbef445631109bd530ce7cdacaa04ff0a62d1 upstream. add two new PCI ID'S for 8265 series Signed-off-by: Ihab Zhaika Signed-off-by: Luca Coelho Cc: Thomas Backlund Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index b7d86b6439ff0e..a88b041685df1e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -508,6 +508,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, /* 9000 Series */ {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)}, From 9f4482f1a31784fce0a09a68d1d5a19883c0eadf Mon Sep 17 00:00:00 2001 From: Ihab Zhaika Date: Tue, 24 Oct 2017 17:36:43 +0300 Subject: [PATCH 0241/3326] iwlwifi: add new cards for 8260 series commit d669fc2d42a43ee0abcf2396df6e9c5a124aa984 upstream. add three new PCI ID'S for 8260 series Signed-off-by: Ihab Zhaika Signed-off-by: Luca Coelho Cc: Thomas Backlund Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index a88b041685df1e..647cca05ba4b1c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -465,6 +465,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)}, @@ -483,6 +485,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)}, From 5f24172d80a4a9d93e20d03b34491ee7a76602cd Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 15 Nov 2017 18:28:04 +0200 Subject: [PATCH 0242/3326] iwlwifi: fix PCI IDs and configuration mapping for 9000 series commit dbc89253a7e15f8f031fb1eeb956de91204655e3 upstream. A lot of PCI IDs were missing and there were some problems with the configuration and firmware selection for devices on the 9000 series. Fix the firmware selection by adding files for the B-steps; add configuration for some integrated devices; and add a bunch of PCI IDs (mostly for integrated devices) that were missing from the driver's list. Without this patch, a lot of devices will not be recognized or will try to load the wrong firmware file. Signed-off-by: Luca Coelho Cc: Thomas Backlund Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/cfg/9000.c | 67 ++++++++- .../net/wireless/intel/iwlwifi/iwl-config.h | 5 + drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 132 ++++++++++++++---- 3 files changed, 170 insertions(+), 34 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index 6f43faab491098..c8e7b54a538ac9 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -72,12 +72,15 @@ #define IWL9000_SMEM_OFFSET 0x400000 #define IWL9000_SMEM_LEN 0x68000 -#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-" +#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-" +#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-" #define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-" #define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-" #define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-" -#define IWL9000_MODULE_FIRMWARE(api) \ - IWL9000_FW_PRE "-" __stringify(api) ".ucode" +#define IWL9000A_MODULE_FIRMWARE(api) \ + IWL9000A_FW_PRE __stringify(api) ".ucode" +#define IWL9000B_MODULE_FIRMWARE(api) \ + IWL9000B_FW_PRE __stringify(api) ".ucode" #define IWL9000RFB_MODULE_FIRMWARE(api) \ IWL9000RFB_FW_PRE __stringify(api) ".ucode" #define IWL9260A_MODULE_FIRMWARE(api) \ @@ -193,7 +196,48 @@ const struct iwl_cfg iwl9460_2ac_cfg = { .nvm_ver = IWL9000_NVM_VERSION, .nvm_calib_ver = IWL9000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl9460_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9460", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9461_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9461", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9462_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9462", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, }; const struct iwl_cfg iwl9560_2ac_cfg = { @@ -205,10 +249,23 @@ const struct iwl_cfg iwl9560_2ac_cfg = { .nvm_ver = IWL9000_NVM_VERSION, .nvm_calib_ver = IWL9000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, - .integrated = true, }; -MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +const struct iwl_cfg iwl9560_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9560", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; +MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 71cb1ecde0f72b..e226179c32fa7e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -364,6 +364,7 @@ struct iwl_cfg { u32 dccm2_len; u32 smem_offset; u32 smem_len; + u32 soc_latency; u16 nvm_ver; u16 nvm_calib_ver; u16 rx_with_siso_diversity:1, @@ -471,6 +472,10 @@ extern const struct iwl_cfg iwl9260_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg; +extern const struct iwl_cfg iwl9460_2ac_cfg_soc; +extern const struct iwl_cfg iwl9461_2ac_cfg_soc; +extern const struct iwl_cfg iwl9462_2ac_cfg_soc; +extern const struct iwl_cfg iwl9560_2ac_cfg_soc; extern const struct iwl_cfg iwla000_2ac_cfg_hr; extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwla000_2ac_cfg_jf; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 647cca05ba4b1c..548e1928430d0d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -533,47 +533,121 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)}, /* a000 Series */ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)}, From 391243ca6dc70e52afed671c0837aceae198abf7 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Fri, 10 Nov 2017 14:03:36 +0200 Subject: [PATCH 0243/3326] iwlwifi: mvm: support version 7 of the SCAN_REQ_UMAC FW command commit dac4df1c5f2c34903f61b1bc4fc722e31b4199e7 upstream. Newer firmware versions (such as iwlwifi-8000C-34.ucode) have introduced an API change in the SCAN_REQ_UMAC command that is not backwards compatible. The driver needs to detect and use the new API format when the firmware reports it, otherwise the scan command will not work properly, causing a command timeout. Fix this by adding a TLV that tells the driver that the new API is in use and use the correct structures for it. This fixes https://bugzilla.kernel.org/show_bug.cgi?id=197591 Fixes: d7a5b3e9e42e ("iwlwifi: mvm: bump API to 34 for 8000 and up") Signed-off-by: Luca Coelho Cc: Thomas Backlund Signed-off-by: Greg Kroah-Hartman --- .../net/wireless/intel/iwlwifi/fw/api/scan.h | 59 ++++++++++--- drivers/net/wireless/intel/iwlwifi/fw/file.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 6 ++ drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 86 ++++++++++++++----- 4 files changed, 118 insertions(+), 34 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 5a40092febfb61..3bfc657f6b4214 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -531,6 +531,8 @@ struct iwl_scan_config_v1 { } __packed; /* SCAN_CONFIG_DB_CMD_API_S */ #define SCAN_TWO_LMACS 2 +#define SCAN_LB_LMAC_IDX 0 +#define SCAN_HB_LMAC_IDX 1 struct iwl_scan_config { __le32 flags; @@ -578,6 +580,7 @@ enum iwl_umac_scan_general_flags { IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9), IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10), IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11), + IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13), }; /** @@ -631,12 +634,17 @@ struct iwl_scan_req_umac_tail { * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @general_flags: &enum iwl_umac_scan_general_flags - * @reserved2: for future use and alignment * @scan_start_mac_id: report the scan start TSF time according to this mac TSF * @extended_dwell: dwell time for channels 1, 6 and 11 * @active_dwell: dwell time for active scan * @passive_dwell: dwell time for passive scan * @fragmented_dwell: dwell time for fragmented passive scan + * @adwell_default_n_aps: for adaptive dwell the default number of APs + * per channel + * @adwell_default_n_aps_social: for adaptive dwell the default + * number of APs per social (1,6,11) channel + * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added + * to total scan time * @max_out_time: max out of serving channel time, per LMAC - for CDB there * are 2 LMACs * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs @@ -644,6 +652,8 @@ struct iwl_scan_req_umac_tail { * @channel_flags: &enum iwl_scan_channel_flags * @n_channels: num of channels in scan request * @reserved: for future use and alignment + * @reserved2: for future use and alignment + * @reserved3: for future use and alignment * @data: &struct iwl_scan_channel_cfg_umac and * &struct iwl_scan_req_umac_tail */ @@ -651,41 +661,64 @@ struct iwl_scan_req_umac { __le32 flags; __le32 uid; __le32 ooc_priority; - /* SCAN_GENERAL_PARAMS_API_S_VER_4 */ __le16 general_flags; - u8 reserved2; + u8 reserved; u8 scan_start_mac_id; - u8 extended_dwell; - u8 active_dwell; - u8 passive_dwell; - u8 fragmented_dwell; union { struct { + u8 extended_dwell; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; __le32 max_out_time; __le32 suspend_time; __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ u8 channel_flags; u8 n_channels; - __le16 reserved; + __le16 reserved2; u8 data[]; } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ struct { + u8 extended_dwell; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ u8 channel_flags; u8 n_channels; - __le16 reserved; + __le16 reserved2; u8 data[]; } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */ + struct { + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; + u8 adwell_default_n_aps; + u8 adwell_default_n_aps_social; + u8 reserved3; + __le16 adwell_max_budget; + __le32 max_out_time[SCAN_TWO_LMACS]; + __le32 suspend_time[SCAN_TWO_LMACS]; + __le32 scan_priority; + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ + u8 channel_flags; + u8 n_channels; + __le16 reserved2; + u8 data[]; + } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */ }; } __packed; -#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE_V7 sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \ + 2 * sizeof(u8) - sizeof(__le16)) #define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \ - 2 * sizeof(__le32)) + 2 * sizeof(__le32) - 2 * sizeof(u8) - \ + sizeof(__le16)) /** * struct iwl_umac_scan_abort diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 279248cd9cfb36..e988e4c371c43f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -262,6 +262,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30, IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31, /* API Set 1 */ + IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34, IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35, IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 949e6341829908..8dcdb522b84678 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1124,6 +1124,12 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); } +static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm) +{ + return fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_ADAPTIVE_DWELL); +} + static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) { /* For now we only use this mode to differentiate between diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 774122fed454fb..e4fd476e9ccb08 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -130,6 +130,19 @@ struct iwl_mvm_scan_params { u32 measurement_dwell; }; +static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) +{ + struct iwl_scan_req_umac *cmd = mvm->scan_cmd; + + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) + return (void *)&cmd->v7.data; + + if (iwl_mvm_has_new_tx_api(mvm)) + return (void *)&cmd->v6.data; + + return (void *)&cmd->v1.data; +} + static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) { if (mvm->scan_rx_ant != ANT_NONE) @@ -1075,25 +1088,57 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, { struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type]; + if (iwl_mvm_is_regular_scan(params)) + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + else + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); + + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) { + if (params->measurement_dwell) { + cmd->v7.active_dwell = params->measurement_dwell; + cmd->v7.passive_dwell = params->measurement_dwell; + } else { + cmd->v7.active_dwell = IWL_SCAN_DWELL_ACTIVE; + cmd->v7.passive_dwell = IWL_SCAN_DWELL_PASSIVE; + } + cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; + + cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->max_out_time); + cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->suspend_time); + if (iwl_mvm_is_cdb_supported(mvm)) { + cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] = + cpu_to_le32(timing->max_out_time); + cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] = + cpu_to_le32(timing->suspend_time); + } + + return; + } + if (params->measurement_dwell) { - cmd->active_dwell = params->measurement_dwell; - cmd->passive_dwell = params->measurement_dwell; - cmd->extended_dwell = params->measurement_dwell; + cmd->v1.active_dwell = params->measurement_dwell; + cmd->v1.passive_dwell = params->measurement_dwell; + cmd->v1.extended_dwell = params->measurement_dwell; } else { - cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE; - cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE; - cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED; + cmd->v1.active_dwell = IWL_SCAN_DWELL_ACTIVE; + cmd->v1.passive_dwell = IWL_SCAN_DWELL_PASSIVE; + cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED; } - cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; + cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; if (iwl_mvm_has_new_tx_api(mvm)) { cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); - cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time); - cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time); + cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->max_out_time); + cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->suspend_time); if (iwl_mvm_is_cdb_supported(mvm)) { - cmd->v6.max_out_time[1] = + cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(timing->max_out_time); - cmd->v6.suspend_time[1] = + cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(timing->suspend_time); } } else { @@ -1102,11 +1147,6 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cmd->v1.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); } - - if (iwl_mvm_is_regular_scan(params)) - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); - else - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); } static void @@ -1178,8 +1218,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int type) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; - void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ? - (void *)&cmd->v6.data : (void *)&cmd->v1.data; + void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm); struct iwl_scan_req_umac_tail *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels; @@ -1216,7 +1255,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; - if (iwl_mvm_has_new_tx_api(mvm)) { + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) { + cmd->v7.channel_flags = channel_flags; + cmd->v7.n_channels = params->n_channels; + } else if (iwl_mvm_has_new_tx_api(mvm)) { cmd->v6.channel_flags = channel_flags; cmd->v6.n_channels = params->n_channels; } else { @@ -1661,8 +1703,10 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm) { int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1; - if (iwl_mvm_has_new_tx_api(mvm)) - base_size = IWL_SCAN_REQ_UMAC_SIZE; + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) + base_size = IWL_SCAN_REQ_UMAC_SIZE_V7; + else if (iwl_mvm_has_new_tx_api(mvm)) + base_size = IWL_SCAN_REQ_UMAC_SIZE_V6; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) return base_size + From 8606bf0219f6632a931f7317dad0d4b672b1e035 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 21 Jul 2017 11:36:23 -0700 Subject: [PATCH 0244/3326] e1000e: Fix error path in link detection commit c4c40e51f9c32c6dd8adf606624c930a1c4d9bbb upstream. In case of error from e1e_rphy(), the loop will exit early and "success" will be set to true erroneously. Signed-off-by: Benjamin Poirier Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/e1000e/phy.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index d78d47b41a716e..86ff0969efb6e0 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, s32 ret_val = 0; u16 i, phy_status; + *success = false; for (i = 0; i < iterations; i++) { /* Some PHYs require the MII_BMSR register to be read * twice due to the link bit being sticky. No harm doing @@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) break; - if (phy_status & BMSR_LSTATUS) + if (phy_status & BMSR_LSTATUS) { + *success = true; break; + } if (usec_interval >= 1000) msleep(usec_interval / 1000); else udelay(usec_interval); } - *success = (i < iterations); - return ret_val; } From 2b91745f8a4accb0f04c07433ee4297054bc88b1 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 21 Jul 2017 11:36:25 -0700 Subject: [PATCH 0245/3326] e1000e: Fix return value test commit d3509f8bc7b0560044c15f0e3ecfde1d9af757a6 upstream. All the helpers return -E1000_ERR_PHY. Signed-off-by: Benjamin Poirier Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/e1000e/netdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 327dfe5bedc00f..764a6bd958ac86 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5099,7 +5099,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) break; } - if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && + if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ e_info("Gigabit has been disabled, downgrading speed\n"); From 830466993daf09adbd179e4c74db07279a088f8c Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 21 Jul 2017 11:36:26 -0700 Subject: [PATCH 0246/3326] e1000e: Separate signaling for link check/link up commit 19110cfbb34d4af0cdfe14cd243f3b09dc95b013 upstream. Lennart reported the following race condition: \ e1000_watchdog_task \ e1000e_has_link \ hw->mac.ops.check_for_link() === e1000e_check_for_copper_link /* link is up */ mac->get_link_status = false; /* interrupt */ \ e1000_msix_other hw->mac.get_link_status = true; link_active = !hw->mac.get_link_status /* link_active is false, wrongly */ This problem arises because the single flag get_link_status is used to signal two different states: link status needs checking and link status is down. Avoid the problem by using the return value of .check_for_link to signal the link status to e1000e_has_link(). Reported-by: Lennart Sorensen Signed-off-by: Benjamin Poirier Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/e1000e/mac.c | 11 ++++++++--- drivers/net/ethernet/intel/e1000e/netdev.c | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index b322011ec2828a..f457c5703d0c45 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. + * + * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link + * up). **/ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) { @@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) - return 0; + return 1; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex @@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) + if (ret_val) { e_dbg("Error configuring flow control\n"); + return ret_val; + } - return ret_val; + return 1; } /** diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 764a6bd958ac86..83071fe9932328 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5081,7 +5081,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) case e1000_media_type_copper: if (hw->mac.get_link_status) { ret_val = hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; + link_active = ret_val > 0; } else { link_active = true; } From 10d0fd29310387313680b12cb69f7b9e11938c8e Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 21 Jul 2017 11:36:27 -0700 Subject: [PATCH 0247/3326] e1000e: Avoid receiver overrun interrupt bursts commit 4aea7a5c5e940c1723add439f4088844cd26196d upstream. When e1000e_poll() is not fast enough to keep up with incoming traffic, the adapter (when operating in msix mode) raises the Other interrupt to signal Receiver Overrun. This is a double problem because 1) at the moment e1000_msix_other() assumes that it is only called in case of Link Status Change and 2) if the condition persists, the interrupt is repeatedly raised again in quick succession. Ideally we would configure the Other interrupt to not be raised in case of receiver overrun but this doesn't seem possible on this adapter. Instead, we handle the first part of the problem by reverting to the practice of reading ICR in the other interrupt handler, like before commit 16ecba59bc33 ("e1000e: Do not read ICR in Other interrupt"). Thanks to commit 0a8047ac68e5 ("e1000e: Fix msi-x interrupt automask") which cleared IAME from CTRL_EXT, reading ICR doesn't interfere with RxQ0, TxQ0 interrupts anymore. We handle the second part of the problem by not re-enabling the Other interrupt right away when there is overrun. Instead, we wait until traffic subsides, napi polling mode is exited and interrupts are re-enabled. Reported-by: Lennart Sorensen Fixes: 16ecba59bc33 ("e1000e: Do not read ICR in Other interrupt") Signed-off-by: Benjamin Poirier Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/e1000e/defines.h | 1 + drivers/net/ethernet/intel/e1000e/netdev.c | 33 ++++++++++++++++----- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 0641c009873803..afb7ebe20b2438 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -398,6 +398,7 @@ #define E1000_ICR_LSC 0x00000004 /* Link Status Change */ #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */ #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ /* If this bit asserted, the driver should claim the interrupt */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 83071fe9932328..99e5ef561f064f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1910,14 +1910,30 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + u32 icr; + bool enable = true; + + icr = er32(ICR); + if (icr & E1000_ICR_RXO) { + ew32(ICR, E1000_ICR_RXO); + enable = false; + /* napi poll will re-enable Other, make sure it runs */ + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + } + if (icr & E1000_ICR_LSC) { + ew32(ICR, E1000_ICR_LSC); + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } - hw->mac.get_link_status = true; - - /* guard against interrupt when we're going down */ - if (!test_bit(__E1000_DOWN, &adapter->state)) { - mod_timer(&adapter->watchdog_timer, jiffies + 1); + if (enable && !test_bit(__E1000_DOWN, &adapter->state)) ew32(IMS, E1000_IMS_OTHER); - } return IRQ_HANDLED; } @@ -2687,7 +2703,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight) napi_complete_done(napi, work_done); if (!test_bit(__E1000_DOWN, &adapter->state)) { if (adapter->msix_entries) - ew32(IMS, adapter->rx_ring->ims_val); + ew32(IMS, adapter->rx_ring->ims_val | + E1000_IMS_OTHER); else e1000_irq_enable(adapter); } @@ -4204,7 +4221,7 @@ static void e1000e_trigger_lsc(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; if (adapter->msix_entries) - ew32(ICS, E1000_ICS_OTHER); + ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER); else ew32(ICS, E1000_ICS_LSC); } From 0f478f25d50cb6464678284a13f514fefc16e271 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Sun, 6 Aug 2017 16:49:18 +0300 Subject: [PATCH 0248/3326] e1000e: fix buffer overrun while the I219 is processing DMA transactions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit b10effb92e272051dd1ec0d7be56bf9ca85ab927 upstream. Intel® 100/200 Series Chipset platforms reduced the round-trip latency for the LAN Controller DMA accesses, causing in some high performance cases a buffer overrun while the I219 LAN Connected Device is processing the DMA transactions. I219LM and I219V devices can fall into unrecovered Tx hang under very stressfully UDP traffic and multiple reconnection of Ethernet cable. This Tx hang of the LAN Controller is only recovered if the system is rebooted. Slightly slow down DMA access by reducing the number of outstanding requests. This workaround could have an impact on TCP traffic performance on the platform. Disabling TSO eliminates performance loss for TCP traffic without a noticeable impact on CPU performance. Please, refer to I218/I219 specification update: https://www.intel.com/content/www/us/en/embedded/products/networking/ ethernet-connection-i218-family-documentation.html Signed-off-by: Sasha Neftin Reviewed-by: Dima Ruinskiy Reviewed-by: Raanan Avargil Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/e1000e/netdev.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 99e5ef561f064f..c38b00c90f4801 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3021,8 +3021,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) hw->mac.ops.config_collision_dist(hw); - /* SPT and CNP Si errata workaround to avoid data corruption */ - if (hw->mac.type >= e1000_pch_spt) { + /* SPT and KBL Si errata workaround to avoid data corruption */ + if (hw->mac.type == e1000_pch_spt) { u32 reg_val; reg_val = er32(IOSFPC); @@ -3030,7 +3030,9 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ew32(IOSFPC, reg_val); reg_val = er32(TARC(0)); - reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ; + /* SPT and KBL Si errata workaround to avoid Tx hang */ + reg_val &= ~BIT(28); + reg_val |= BIT(29); ew32(TARC(0), reg_val); } } From 191314edb326764c4481b09ccf7d00159abe4679 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 30 Nov 2017 08:41:00 +0000 Subject: [PATCH 0249/3326] Linux 4.14.3 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 75d89dc2b94ac8..ede4de0d86344a 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 2 +SUBLEVEL = 3 EXTRAVERSION = NAME = Petit Gorille From 2cbd866dd858ceebe2b735e3d8cd8eb702ca1f88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Br=C3=BCns?= Date: Fri, 3 Nov 2017 03:01:53 +0100 Subject: [PATCH 0250/3326] platform/x86: hp-wmi: Fix tablet mode detection for convertibles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 9968e12a291e639dd51d1218b694d440b22a917f upstream. Commit f9cf3b2880cc ("platform/x86: hp-wmi: Refactor dock and tablet state fetchers") consolidated the methods for docking and laptop mode detection, but omitted to apply the correct mask for the laptop mode (it always uses the constant for docking). Fixes: f9cf3b2880cc ("platform/x86: hp-wmi: Refactor dock and tablet state fetchers") Signed-off-by: Stefan Brüns Cc: Michel Dänzer Signed-off-by: Andy Shevchenko Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/hp-wmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index b4ed3dc983d522..b4224389febebe 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -297,7 +297,7 @@ static int hp_wmi_hw_state(int mask) if (state < 0) return state; - return state & 0x1; + return !!(state & mask); } static int __init hp_wmi_bios_2008_later(void) From b8d0c953d928fbdc3207f741a2b77055d34fd0b9 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 29 Nov 2017 16:09:54 -0800 Subject: [PATCH 0251/3326] mm, memory_hotplug: do not back off draining pcp free pages from kworker context commit 4b81cb2ff69c8a8e297a147d2eb4d9b5e8d7c435 upstream. drain_all_pages backs off when called from a kworker context since commit 0ccce3b92421 ("mm, page_alloc: drain per-cpu pages from workqueue context") because the original IPI based pcp draining has been replaced by a WQ based one and the check wanted to prevent from recursion and inter workers dependencies. This has made some sense at the time because the system WQ has been used and one worker holding the lock could be blocked while waiting for new workers to emerge which can be a problem under OOM conditions. Since then commit ce612879ddc7 ("mm: move pcp and lru-pcp draining into single wq") has moved draining to a dedicated (mm_percpu_wq) WQ with a rescuer so we shouldn't depend on any other WQ activity to make a forward progress so calling drain_all_pages from a worker context is safe as long as this doesn't happen from mm_percpu_wq itself which is not the case because all workers are required to _not_ depend on any MM locks. Why is this a problem in the first place? ACPI driven memory hot-remove (acpi_device_hotplug) is executed from the worker context. We end up calling __offline_pages to free all the pages and that requires both lru_add_drain_all_cpuslocked and drain_all_pages to do their job otherwise we can have dangling pages on pcp lists and fail the offline operation (__test_page_isolated_in_pageblock would see a page with 0 ref count but without PageBuddy set). Fix the issue by removing the worker check in drain_all_pages. lru_add_drain_all_cpuslocked doesn't have this restriction so it works as expected. Link: http://lkml.kernel.org/r/20170828093341.26341-1-mhocko@kernel.org Fixes: 0ccce3b924212 ("mm, page_alloc: drain per-cpu pages from workqueue context") Signed-off-by: Michal Hocko Cc: Mel Gorman Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/page_alloc.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 82a6270c9743c9..286de4cee4522a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2487,10 +2487,6 @@ void drain_all_pages(struct zone *zone) if (WARN_ON_ONCE(!mm_percpu_wq)) return; - /* Workqueues cannot recurse */ - if (current->flags & PF_WQ_WORKER) - return; - /* * Do not drain if one is already in progress unless it's specific to * a zone. Such callers are primarily CMA and memory hotplug and need From 786b924d39bad16ff99aacdb4076df027cc2f8b8 Mon Sep 17 00:00:00 2001 From: Wang Nan Date: Wed, 29 Nov 2017 16:09:58 -0800 Subject: [PATCH 0252/3326] mm, oom_reaper: gather each vma to prevent leaking TLB entry commit 687cb0884a714ff484d038e9190edc874edcf146 upstream. tlb_gather_mmu(&tlb, mm, 0, -1) means gathering the whole virtual memory space. In this case, tlb->fullmm is true. Some archs like arm64 doesn't flush TLB when tlb->fullmm is true: commit 5a7862e83000 ("arm64: tlbflush: avoid flushing when fullmm == 1"). Which causes leaking of tlb entries. Will clarifies his patch: "Basically, we tag each address space with an ASID (PCID on x86) which is resident in the TLB. This means we can elide TLB invalidation when pulling down a full mm because we won't ever assign that ASID to another mm without doing TLB invalidation elsewhere (which actually just nukes the whole TLB). I think that means that we could potentially not fault on a kernel uaccess, because we could hit in the TLB" There could be a window between complete_signal() sending IPI to other cores and all threads sharing this mm are really kicked off from cores. In this window, the oom reaper may calls tlb_flush_mmu_tlbonly() to flush TLB then frees pages. However, due to the above problem, the TLB entries are not really flushed on arm64. Other threads are possible to access these pages through TLB entries. Moreover, a copy_to_user() can also write to these pages without generating page fault, causes use-after-free bugs. This patch gathers each vma instead of gathering full vm space. In this case tlb->fullmm is not true. The behavior of oom reaper become similar to munmapping before do_exit, which should be safe for all archs. Link: http://lkml.kernel.org/r/20171107095453.179940-1-wangnan0@huawei.com Fixes: aac453635549 ("mm, oom: introduce oom reaper") Signed-off-by: Wang Nan Acked-by: Michal Hocko Acked-by: David Rientjes Cc: Minchan Kim Cc: Will Deacon Cc: Bob Liu Cc: Ingo Molnar Cc: Roman Gushchin Cc: Konstantin Khlebnikov Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/oom_kill.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index dee0f75c301337..18c5b356b5053c 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -532,7 +532,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) */ set_bit(MMF_UNSTABLE, &mm->flags); - tlb_gather_mmu(&tlb, mm, 0, -1); for (vma = mm->mmap ; vma; vma = vma->vm_next) { if (!can_madv_dontneed_vma(vma)) continue; @@ -547,11 +546,13 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) * we do not want to block exit_mmap by keeping mm ref * count elevated without a good reason. */ - if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) + if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { + tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end); unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, NULL); + tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end); + } } - tlb_finish_mmu(&tlb, 0, -1); pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", task_pid_nr(tsk), tsk->comm, K(get_mm_counter(mm, MM_ANONPAGES)), From 01ca9727457a167463a47e35b6fe5a5173b4e341 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 27 Nov 2017 06:21:25 +0300 Subject: [PATCH 0253/3326] mm, thp: Do not make page table dirty unconditionally in touch_p[mu]d() commit a8f97366452ed491d13cf1e44241bc0b5740b1f0 upstream. Currently, we unconditionally make page table dirty in touch_pmd(). It may result in false-positive can_follow_write_pmd(). We may avoid the situation, if we would only make the page table entry dirty if caller asks for write access -- FOLL_WRITE. The patch also changes touch_pud() in the same way. Signed-off-by: Kirill A. Shutemov Cc: Michal Hocko Cc: Hugh Dickins Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/huge_memory.c | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1981ed697dabb5..eba34cdfc3e5b5 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -842,20 +842,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd) + pmd_t *pmd, int flags) { pmd_t _pmd; - /* - * We should set the dirty bit only for FOLL_WRITE but for now - * the dirty bit in the pmd is meaningless. And if the dirty - * bit will become meaningful and we'll only set it with - * FOLL_WRITE, an atomic set_bit will be required on the pmd to - * set the young bit, instead of the current set_pmd_at. - */ - _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); + _pmd = pmd_mkyoung(*pmd); + if (flags & FOLL_WRITE) + _pmd = pmd_mkdirty(_pmd); if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, - pmd, _pmd, 1)) + pmd, _pmd, flags & FOLL_WRITE)) update_mmu_cache_pmd(vma, addr, pmd); } @@ -884,7 +879,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, return NULL; if (flags & FOLL_TOUCH) - touch_pmd(vma, addr, pmd); + touch_pmd(vma, addr, pmd, flags); /* * device mapped pages can only be returned if the @@ -995,20 +990,15 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static void touch_pud(struct vm_area_struct *vma, unsigned long addr, - pud_t *pud) + pud_t *pud, int flags) { pud_t _pud; - /* - * We should set the dirty bit only for FOLL_WRITE but for now - * the dirty bit in the pud is meaningless. And if the dirty - * bit will become meaningful and we'll only set it with - * FOLL_WRITE, an atomic set_bit will be required on the pud to - * set the young bit, instead of the current set_pud_at. - */ - _pud = pud_mkyoung(pud_mkdirty(*pud)); + _pud = pud_mkyoung(*pud); + if (flags & FOLL_WRITE) + _pud = pud_mkdirty(_pud); if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, - pud, _pud, 1)) + pud, _pud, flags & FOLL_WRITE)) update_mmu_cache_pud(vma, addr, pud); } @@ -1031,7 +1021,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, return NULL; if (flags & FOLL_TOUCH) - touch_pud(vma, addr, pud); + touch_pud(vma, addr, pud, flags); /* * device mapped pages can only be returned if the @@ -1407,7 +1397,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, page = pmd_page(*pmd); VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); if (flags & FOLL_TOUCH) - touch_pmd(vma, addr, pmd); + touch_pmd(vma, addr, pmd, flags); if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* * We don't mlock() pte-mapped THPs. This way we can avoid From b4c8fce668423c0dde23fe57682e5bd8df5bbeb9 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Wed, 29 Nov 2017 16:10:01 -0800 Subject: [PATCH 0254/3326] mm/cma: fix alloc_contig_range ret code/potential leak commit 63cd448908b5eb51d84c52f02b31b9b4ccd1cb5a upstream. If the call __alloc_contig_migrate_range() in alloc_contig_range returns -EBUSY, processing continues so that test_pages_isolated() is called where there is a tracepoint to identify the busy pages. However, it is possible for busy pages to become available between the calls to these two routines. In this case, the range of pages may be allocated. Unfortunately, the original return code (ret == -EBUSY) is still set and returned to the caller. Therefore, the caller believes the pages were not allocated and they are leaked. Update the comment to indicate that allocation is still possible even if __alloc_contig_migrate_range returns -EBUSY. Also, clear return code in this case so that it is not accidentally used or returned to caller. Link: http://lkml.kernel.org/r/20171122185214.25285-1-mike.kravetz@oracle.com Fixes: 8ef5849fa8a2 ("mm/cma: always check which page caused allocation failure") Signed-off-by: Mike Kravetz Acked-by: Vlastimil Babka Acked-by: Michal Hocko Acked-by: Johannes Weiner Acked-by: Joonsoo Kim Cc: Michal Nazarewicz Cc: Laura Abbott Cc: Michal Hocko Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/page_alloc.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 286de4cee4522a..d51c2087c498c6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7587,11 +7587,18 @@ int alloc_contig_range(unsigned long start, unsigned long end, /* * In case of -EBUSY, we'd like to know which page causes problem. - * So, just fall through. We will check it in test_pages_isolated(). + * So, just fall through. test_pages_isolated() has a tracepoint + * which will report the busy page. + * + * It is possible that busy pages could become available before + * the call to test_pages_isolated, and the range will actually be + * allocated. So, if we fall through be sure to clear ret so that + * -EBUSY is not accidentally used or returned to caller. */ ret = __alloc_contig_migrate_range(&cc, start, end); if (ret && ret != -EBUSY) goto done; + ret =0; /* * Pages from [start, end) are within a MAX_ORDER_NR_PAGES From bf55918cb4fb374000ad1d063b207a17a93e8726 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 29 Nov 2017 16:10:06 -0800 Subject: [PATCH 0255/3326] mm: fix device-dax pud write-faults triggered by get_user_pages() commit 1501899a898dfb5477c55534bdfd734c046da06d upstream. Currently only get_user_pages_fast() can safely handle the writable gup case due to its use of pud_access_permitted() to check whether the pud entry is writable. In the gup slow path pud_write() is used instead of pud_access_permitted() and to date it has been unimplemented, just calls BUG_ON(). kernel BUG at ./include/linux/hugetlb.h:244! [..] RIP: 0010:follow_devmap_pud+0x482/0x490 [..] Call Trace: follow_page_mask+0x28c/0x6e0 __get_user_pages+0xe4/0x6c0 get_user_pages_unlocked+0x130/0x1b0 get_user_pages_fast+0x89/0xb0 iov_iter_get_pages_alloc+0x114/0x4a0 nfs_direct_read_schedule_iovec+0xd2/0x350 ? nfs_start_io_direct+0x63/0x70 nfs_file_direct_read+0x1e0/0x250 nfs_file_read+0x90/0xc0 For now this just implements a simple check for the _PAGE_RW bit similar to pmd_write. However, this implies that the gup-slow-path check is missing the extra checks that the gup-fast-path performs with pud_access_permitted. Later patches will align all checks to use the 'access_permitted' helper if the architecture provides it. Note that the generic 'access_permitted' helper fallback is the simple _PAGE_RW check on architectures that do not define the 'access_permitted' helper(s). [dan.j.williams@intel.com: fix powerpc compile error] Link: http://lkml.kernel.org/r/151129126165.37405.16031785266675461397.stgit@dwillia2-desk3.amr.corp.intel.com Link: http://lkml.kernel.org/r/151043109938.2842.14834662818213616199.stgit@dwillia2-desk3.amr.corp.intel.com Fixes: a00cc7d9dd93 ("mm, x86: add support for PUD-sized transparent hugepages") Signed-off-by: Dan Williams Reported-by: Stephen Rothwell Acked-by: Thomas Gleixner [x86] Cc: Kirill A. Shutemov Cc: Catalin Marinas Cc: "David S. Miller" Cc: Dave Hansen Cc: Will Deacon Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/pgtable.h | 6 ++++++ include/asm-generic/pgtable.h | 8 ++++++++ include/linux/hugetlb.h | 8 -------- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index f735c30163252f..f02de8bc1f7202 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -1093,6 +1093,12 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); } +#define pud_write pud_write +static inline int pud_write(pud_t pud) +{ + return pud_flags(pud) & _PAGE_RW; +} + /* * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); * diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 757dc6ffc7ba5f..1ac457511f4eb5 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -814,6 +814,14 @@ static inline int pmd_write(pmd_t pmd) #endif /* __HAVE_ARCH_PMD_WRITE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#ifndef pud_write +static inline int pud_write(pud_t pud) +{ + BUG(); + return 0; +} +#endif /* pud_write */ + #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index fbf5b31d47eea9..82a25880714ac6 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -239,14 +239,6 @@ static inline int pgd_write(pgd_t pgd) } #endif -#ifndef pud_write -static inline int pud_write(pud_t pud) -{ - BUG(); - return 0; -} -#endif - #define HUGETLB_ANON_FILE "anon_hugepage" enum { From c6c78a1d4561880c744d3ec932264b264a879e7c Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 29 Nov 2017 16:10:28 -0800 Subject: [PATCH 0256/3326] mm, hugetlbfs: introduce ->split() to vm_operations_struct commit 31383c6865a578834dd953d9dbc88e6b19fe3997 upstream. Patch series "device-dax: fix unaligned munmap handling" When device-dax is operating in huge-page mode we want it to behave like hugetlbfs and fail attempts to split vmas into unaligned ranges. It would be messy to teach the munmap path about device-dax alignment constraints in the same (hstate) way that hugetlbfs communicates this constraint. Instead, these patches introduce a new ->split() vm operation. This patch (of 2): The device-dax interface has similar constraints as hugetlbfs in that it requires the munmap path to unmap in huge page aligned units. Rather than add more custom vma handling code in __split_vma() introduce a new vm operation to perform this vma specific check. Link: http://lkml.kernel.org/r/151130418135.4029.6783191281930729710.stgit@dwillia2-desk3.amr.corp.intel.com Fixes: dee410792419 ("/dev/dax, core: file operations and dax-mmap") Signed-off-by: Dan Williams Cc: Jeff Moyer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/mm.h | 1 + mm/hugetlb.c | 8 ++++++++ mm/mmap.c | 8 +++++--- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 43edf659453b26..c96a8b769e3dbc 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -367,6 +367,7 @@ enum page_entry_size { struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); + int (*split)(struct vm_area_struct * area, unsigned long addr); int (*mremap)(struct vm_area_struct * area); int (*fault)(struct vm_fault *vmf); int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2d2ff5e8bf2bc0..a2233d722ff94b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3125,6 +3125,13 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma) } } +static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) +{ + if (addr & ~(huge_page_mask(hstate_vma(vma)))) + return -EINVAL; + return 0; +} + /* * We cannot handle pagefaults against hugetlb pages at all. They cause * handle_mm_fault() to try to instantiate regular-sized pages in the @@ -3141,6 +3148,7 @@ const struct vm_operations_struct hugetlb_vm_ops = { .fault = hugetlb_vm_op_fault, .open = hugetlb_vm_op_open, .close = hugetlb_vm_op_close, + .split = hugetlb_vm_op_split, }; static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, diff --git a/mm/mmap.c b/mm/mmap.c index 680506faceae91..476e810cf10032 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2540,9 +2540,11 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *new; int err; - if (is_vm_hugetlb_page(vma) && (addr & - ~(huge_page_mask(hstate_vma(vma))))) - return -EINVAL; + if (vma->vm_ops && vma->vm_ops->split) { + err = vma->vm_ops->split(vma, addr); + if (err) + return err; + } new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!new) From 61586a2286140935fe711b3410c53ee1141633b2 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 29 Nov 2017 16:10:32 -0800 Subject: [PATCH 0257/3326] device-dax: implement ->split() to catch invalid munmap attempts commit 9702cffdbf2129516db679e4467db81e1cd287da upstream. Similar to how device-dax enforces that the 'address', 'offset', and 'len' parameters to mmap() be aligned to the device's fundamental alignment, the same constraints apply to munmap(). Implement ->split() to fail munmap calls that violate the alignment constraint. Otherwise, we later fail VM_BUG_ON checks in the unmap_page_range() path with crash signatures of the form: vma ffff8800b60c8a88 start 00007f88c0000000 end 00007f88c0e00000 next (null) prev (null) mm ffff8800b61150c0 prot 8000000000000027 anon_vma (null) vm_ops ffffffffa0091240 pgoff 0 file ffff8800b638ef80 private_data (null) flags: 0x380000fb(read|write|shared|mayread|maywrite|mayexec|mayshare|softdirty|mixedmap|hugepage) ------------[ cut here ]------------ kernel BUG at mm/huge_memory.c:2014! [..] RIP: 0010:__split_huge_pud+0x12a/0x180 [..] Call Trace: unmap_page_range+0x245/0xa40 ? __vma_adjust+0x301/0x990 unmap_vmas+0x4c/0xa0 unmap_region+0xae/0x120 ? __vma_rb_erase+0x11a/0x230 do_munmap+0x276/0x410 vm_munmap+0x6a/0xa0 SyS_munmap+0x1d/0x30 Link: http://lkml.kernel.org/r/151130418681.4029.7118245855057952010.stgit@dwillia2-desk3.amr.corp.intel.com Fixes: dee410792419 ("/dev/dax, core: file operations and dax-mmap") Signed-off-by: Dan Williams Reported-by: Jeff Moyer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- drivers/dax/device.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/dax/device.c b/drivers/dax/device.c index e9f3b3e4bbf458..375b99bca002c9 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -427,9 +427,21 @@ static int dev_dax_fault(struct vm_fault *vmf) return dev_dax_huge_fault(vmf, PE_SIZE_PTE); } +static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr) +{ + struct file *filp = vma->vm_file; + struct dev_dax *dev_dax = filp->private_data; + struct dax_region *dax_region = dev_dax->region; + + if (!IS_ALIGNED(addr, dax_region->align)) + return -EINVAL; + return 0; +} + static const struct vm_operations_struct dax_vm_ops = { .fault = dev_dax_fault, .huge_fault = dev_dax_huge_fault, + .split = dev_dax_split, }; static int dax_mmap(struct file *filp, struct vm_area_struct *vma) From 40aa9d2998ca55d9011e75a0fa526ed52e0a4bce Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 29 Nov 2017 16:10:35 -0800 Subject: [PATCH 0258/3326] mm: introduce get_user_pages_longterm commit 2bb6d2837083de722bfdc369cb0d76ce188dd9b4 upstream. Patch series "introduce get_user_pages_longterm()", v2. Here is a new get_user_pages api for cases where a driver intends to keep an elevated page count indefinitely. This is distinct from usages like iov_iter_get_pages where the elevated page counts are transient. The iov_iter_get_pages cases immediately turn around and submit the pages to a device driver which will put_page when the i/o operation completes (under kernel control). In the longterm case userspace is responsible for dropping the page reference at some undefined point in the future. This is untenable for filesystem-dax case where the filesystem is in control of the lifetime of the block / page and needs reasonable limits on how long it can wait for pages in a mapping to become idle. Fixing filesystems to actually wait for dax pages to be idle before blocks from a truncate/hole-punch operation are repurposed is saved for a later patch series. Also, allowing longterm registration of dax mappings is a future patch series that introduces a "map with lease" semantic where the kernel can revoke a lease and force userspace to drop its page references. I have also tagged these for -stable to purposely break cases that might assume that longterm memory registrations for filesystem-dax mappings were supported by the kernel. The behavior regression this policy change implies is one of the reasons we maintain the "dax enabled. Warning: EXPERIMENTAL, use at your own risk" notification when mounting a filesystem in dax mode. It is worth noting the device-dax interface does not suffer the same constraints since it does not support file space management operations like hole-punch. This patch (of 4): Until there is a solution to the dma-to-dax vs truncate problem it is not safe to allow long standing memory registrations against filesytem-dax vmas. Device-dax vmas do not have this problem and are explicitly allowed. This is temporary until a "memory registration with layout-lease" mechanism can be implemented for the affected sub-systems (RDMA and V4L2). [akpm@linux-foundation.org: use kcalloc()] Link: http://lkml.kernel.org/r/151068939435.7446.13560129395419350737.stgit@dwillia2-desk3.amr.corp.intel.com Fixes: 3565fce3a659 ("mm, x86: get_user_pages() for dax mappings") Signed-off-by: Dan Williams Suggested-by: Christoph Hellwig Cc: Doug Ledford Cc: Hal Rosenstock Cc: Inki Dae Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jeff Moyer Cc: Joonyoung Shim Cc: Kyungmin Park Cc: Mauro Carvalho Chehab Cc: Mel Gorman Cc: Ross Zwisler Cc: Sean Hefty Cc: Seung-Woo Kim Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/fs.h | 14 ++++++++++ include/linux/mm.h | 13 ++++++++++ mm/gup.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 91 insertions(+) diff --git a/include/linux/fs.h b/include/linux/fs.h index 885266aae2d7d5..9c6c51b7a64459 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3175,6 +3175,20 @@ static inline bool vma_is_dax(struct vm_area_struct *vma) return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); } +static inline bool vma_is_fsdax(struct vm_area_struct *vma) +{ + struct inode *inode; + + if (!vma->vm_file) + return false; + if (!vma_is_dax(vma)) + return false; + inode = file_inode(vma->vm_file); + if (inode->i_mode == S_IFCHR) + return false; /* device-dax */ + return true; +} + static inline int iocb_flags(struct file *file) { int res = 0; diff --git a/include/linux/mm.h b/include/linux/mm.h index c96a8b769e3dbc..db647d428100c2 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1368,6 +1368,19 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); +#ifdef CONFIG_FS_DAX +long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas); +#else +static inline long get_user_pages_longterm(unsigned long start, + unsigned long nr_pages, unsigned int gup_flags, + struct page **pages, struct vm_area_struct **vmas) +{ + return get_user_pages(start, nr_pages, gup_flags, pages, vmas); +} +#endif /* CONFIG_FS_DAX */ + int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); diff --git a/mm/gup.c b/mm/gup.c index b2b4d4263768d8..165ba2174c7584 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1095,6 +1095,70 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, } EXPORT_SYMBOL(get_user_pages); +#ifdef CONFIG_FS_DAX +/* + * This is the same as get_user_pages() in that it assumes we are + * operating on the current task's mm, but it goes further to validate + * that the vmas associated with the address range are suitable for + * longterm elevated page reference counts. For example, filesystem-dax + * mappings are subject to the lifetime enforced by the filesystem and + * we need guarantees that longterm users like RDMA and V4L2 only + * establish mappings that have a kernel enforced revocation mechanism. + * + * "longterm" == userspace controlled elevated page count lifetime. + * Contrast this to iov_iter_get_pages() usages which are transient. + */ +long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas_arg) +{ + struct vm_area_struct **vmas = vmas_arg; + struct vm_area_struct *vma_prev = NULL; + long rc, i; + + if (!pages) + return -EINVAL; + + if (!vmas) { + vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *), + GFP_KERNEL); + if (!vmas) + return -ENOMEM; + } + + rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas); + + for (i = 0; i < rc; i++) { + struct vm_area_struct *vma = vmas[i]; + + if (vma == vma_prev) + continue; + + vma_prev = vma; + + if (vma_is_fsdax(vma)) + break; + } + + /* + * Either get_user_pages() failed, or the vma validation + * succeeded, in either case we don't need to put_page() before + * returning. + */ + if (i >= rc) + goto out; + + for (i = 0; i < rc; i++) + put_page(pages[i]); + rc = -EOPNOTSUPP; +out: + if (vmas != vmas_arg) + kfree(vmas); + return rc; +} +EXPORT_SYMBOL(get_user_pages_longterm); +#endif /* CONFIG_FS_DAX */ + /** * populate_vma_page_range() - populate a range of pages in the vma. * @vma: target vma From daac045736edf0e4ec21d7b39b3633537246f9c8 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 29 Nov 2017 16:10:39 -0800 Subject: [PATCH 0259/3326] mm: fail get_vaddr_frames() for filesystem-dax mappings commit b7f0554a56f21fb3e636a627450a9add030889be upstream. Until there is a solution to the dma-to-dax vs truncate problem it is not safe to allow V4L2, Exynos, and other frame vector users to create long standing / irrevocable memory registrations against filesytem-dax vmas. [dan.j.williams@intel.com: add comment for vma_is_fsdax() check in get_vaddr_frames(), per Jan] Link: http://lkml.kernel.org/r/151197874035.26211.4061781453123083667.stgit@dwillia2-desk3.amr.corp.intel.com Link: http://lkml.kernel.org/r/151068939985.7446.15684639617389154187.stgit@dwillia2-desk3.amr.corp.intel.com Fixes: 3565fce3a659 ("mm, x86: get_user_pages() for dax mappings") Signed-off-by: Dan Williams Reviewed-by: Jan Kara Cc: Inki Dae Cc: Seung-Woo Kim Cc: Joonyoung Shim Cc: Kyungmin Park Cc: Mauro Carvalho Chehab Cc: Mel Gorman Cc: Vlastimil Babka Cc: Christoph Hellwig Cc: Doug Ledford Cc: Hal Rosenstock Cc: Jason Gunthorpe Cc: Jeff Moyer Cc: Ross Zwisler Cc: Sean Hefty Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/frame_vector.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mm/frame_vector.c b/mm/frame_vector.c index 2f98df0d460eef..297c7238f7d409 100644 --- a/mm/frame_vector.c +++ b/mm/frame_vector.c @@ -53,6 +53,18 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, ret = -EFAULT; goto out; } + + /* + * While get_vaddr_frames() could be used for transient (kernel + * controlled lifetime) pinning of memory pages all current + * users establish long term (userspace controlled lifetime) + * page pinning. Treat get_vaddr_frames() like + * get_user_pages_longterm() and disallow it for filesystem-dax + * mappings. + */ + if (vma_is_fsdax(vma)) + return -EOPNOTSUPP; + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { vec->got_ref = true; vec->is_pfns = false; From e13ee3368ee1cbf4d2a9a70af20cdf519fd8bb3c Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 29 Nov 2017 16:10:43 -0800 Subject: [PATCH 0260/3326] v4l2: disable filesystem-dax mapping support commit b70131de648c2b997d22f4653934438013f407a1 upstream. V4L2 memory registrations are incompatible with filesystem-dax that needs the ability to revoke dma access to a mapping at will, or otherwise allow the kernel to wait for completion of DMA. The filesystem-dax implementation breaks the traditional solution of truncate of active file backed mappings since there is no page-cache page we can orphan to sustain ongoing DMA. If v4l2 wants to support long lived DMA mappings it needs to arrange to hold a file lease or use some other mechanism so that the kernel can coordinate revoking DMA access when the filesystem needs to truncate mappings. Link: http://lkml.kernel.org/r/151068940499.7446.12846708245365671207.stgit@dwillia2-desk3.amr.corp.intel.com Fixes: 3565fce3a659 ("mm, x86: get_user_pages() for dax mappings") Signed-off-by: Dan Williams Reported-by: Jan Kara Reviewed-by: Jan Kara Cc: Mauro Carvalho Chehab Cc: Christoph Hellwig Cc: Doug Ledford Cc: Hal Rosenstock Cc: Inki Dae Cc: Jason Gunthorpe Cc: Jeff Moyer Cc: Joonyoung Shim Cc: Kyungmin Park Cc: Mel Gorman Cc: Ross Zwisler Cc: Sean Hefty Cc: Seung-Woo Kim Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/videobuf-dma-sg.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 0b5c43f7e020da..f412429cf5ba58 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -185,12 +185,13 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n", data, size, dma->nr_pages); - err = get_user_pages(data & PAGE_MASK, dma->nr_pages, + err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages, flags, dma->pages, NULL); if (err != dma->nr_pages) { dma->nr_pages = (err >= 0) ? err : 0; - dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages); + dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err, + dma->nr_pages); return err < 0 ? err : -EINVAL; } return 0; From aab681eb8f3bfe2950c8493f39cef6ae34b731b3 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 29 Nov 2017 16:10:47 -0800 Subject: [PATCH 0261/3326] IB/core: disable memory registration of filesystem-dax vmas commit 5f1d43de54164dcfb9bfa542fcc92c1e1a1b6c1d upstream. Until there is a solution to the dma-to-dax vs truncate problem it is not safe to allow RDMA to create long standing memory registrations against filesytem-dax vmas. Link: http://lkml.kernel.org/r/151068941011.7446.7766030590347262502.stgit@dwillia2-desk3.amr.corp.intel.com Fixes: 3565fce3a659 ("mm, x86: get_user_pages() for dax mappings") Signed-off-by: Dan Williams Reported-by: Christoph Hellwig Reviewed-by: Christoph Hellwig Acked-by: Jason Gunthorpe Acked-by: Doug Ledford Cc: Sean Hefty Cc: Hal Rosenstock Cc: Jeff Moyer Cc: Ross Zwisler Cc: Inki Dae Cc: Jan Kara Cc: Joonyoung Shim Cc: Kyungmin Park Cc: Mauro Carvalho Chehab Cc: Mel Gorman Cc: Seung-Woo Kim Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/umem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 21e60b1e2ff41b..130606c3b07c15 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -191,7 +191,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, sg_list_start = umem->sg_head.sgl; while (npages) { - ret = get_user_pages(cur_base, + ret = get_user_pages_longterm(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof (struct page *)), gup_flags, page_list, vma_list); From c16a65582a72d3adea145b9b0a737c6c35ce89fc Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 29 Nov 2017 16:10:51 -0800 Subject: [PATCH 0262/3326] exec: avoid RLIMIT_STACK races with prlimit() commit 04e35f4495dd560db30c25efca4eecae8ec8c375 upstream. While the defense-in-depth RLIMIT_STACK limit on setuid processes was protected against races from other threads calling setrlimit(), I missed protecting it against races from external processes calling prlimit(). This adds locking around the change and makes sure that rlim_max is set too. Link: http://lkml.kernel.org/r/20171127193457.GA11348@beast Fixes: 64701dee4178e ("exec: Use sane stack rlimit under secureexec") Signed-off-by: Kees Cook Reported-by: Ben Hutchings Reported-by: Brad Spengler Acked-by: Serge Hallyn Cc: James Morris Cc: Andy Lutomirski Cc: Oleg Nesterov Cc: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/exec.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fs/exec.c b/fs/exec.c index 3e14ba25f678bf..4726c777dd387b 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1340,10 +1340,15 @@ void setup_new_exec(struct linux_binprm * bprm) * avoid bad behavior from the prior rlimits. This has to * happen before arch_pick_mmap_layout(), which examines * RLIMIT_STACK, but after the point of no return to avoid - * needing to clean up the change on failure. + * races from other threads changing the limits. This also + * must be protected from races with prlimit() calls. */ + task_lock(current->group_leader); if (current->signal->rlim[RLIMIT_STACK].rlim_cur > _STK_LIM) current->signal->rlim[RLIMIT_STACK].rlim_cur = _STK_LIM; + if (current->signal->rlim[RLIMIT_STACK].rlim_max > _STK_LIM) + current->signal->rlim[RLIMIT_STACK].rlim_max = _STK_LIM; + task_unlock(current->group_leader); } arch_pick_mmap_layout(current->mm); From 8a0bb9ebaa8b8faee61f095757662fe5d7fd8da6 Mon Sep 17 00:00:00 2001 From: chenjie Date: Wed, 29 Nov 2017 16:10:54 -0800 Subject: [PATCH 0263/3326] mm/madvise.c: fix madvise() infinite loop under special circumstances commit 6ea8d958a2c95a1d514015d4e29ba21a8c0a1a91 upstream. MADVISE_WILLNEED has always been a noop for DAX (formerly XIP) mappings. Unfortunately madvise_willneed() doesn't communicate this information properly to the generic madvise syscall implementation. The calling convention is quite subtle there. madvise_vma() is supposed to either return an error or update &prev otherwise the main loop will never advance to the next vma and it will keep looping for ever without a way to get out of the kernel. It seems this has been broken since introduction. Nobody has noticed because nobody seems to be using MADVISE_WILLNEED on these DAX mappings. [mhocko@suse.com: rewrite changelog] Link: http://lkml.kernel.org/r/20171127115318.911-1-guoxuenan@huawei.com Fixes: fe77ba6f4f97 ("[PATCH] xip: madvice/fadvice: execute in place") Signed-off-by: chenjie Signed-off-by: guoxuenan Acked-by: Michal Hocko Cc: Minchan Kim Cc: zhangyi (F) Cc: Miao Xie Cc: Mike Rapoport Cc: Shaohua Li Cc: Andrea Arcangeli Cc: Mel Gorman Cc: Kirill A. Shutemov Cc: David Rientjes Cc: Anshuman Khandual Cc: Rik van Riel Cc: Carsten Otte Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/madvise.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index 375cf32087e4a2..751e97aa22106f 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -276,15 +276,14 @@ static long madvise_willneed(struct vm_area_struct *vma, { struct file *file = vma->vm_file; + *prev = vma; #ifdef CONFIG_SWAP if (!file) { - *prev = vma; force_swapin_readahead(vma, start, end); return 0; } if (shmem_mapping(file->f_mapping)) { - *prev = vma; force_shm_swapin_readahead(vma, start, end, file->f_mapping); return 0; @@ -299,7 +298,6 @@ static long madvise_willneed(struct vm_area_struct *vma, return 0; } - *prev = vma; start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; if (end > vma->vm_end) end = vma->vm_end; From 13167cf417ddf105d2822eebe775547ed35d1a08 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Wed, 29 Nov 2017 16:11:12 -0800 Subject: [PATCH 0264/3326] mm: migrate: fix an incorrect call of prep_transhuge_page() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 40a899ed16486455f964e46d1af31fd4fded21c1 upstream. In https://lkml.org/lkml/2017/11/20/411, Andrea reported that during memory hotplug/hot remove prep_transhuge_page() is called incorrectly on non-THP pages for migration, when THP is on but THP migration is not enabled. This leads to a bad state of target pages for migration. By inspecting the code, if called on a non-THP, prep_transhuge_page() will 1) change the value of the mapping of (page + 2), since it is used for THP deferred list; 2) change the lru value of (page + 1), since it is used for THP's dtor. Both can lead to data corruption of these two pages. Andrea said: "Pragmatically and from the point of view of the memory_hotplug subsys, the effect is a kernel crash when pages are being migrated during a memory hot remove offline and migration target pages are found in a bad state" This patch fixes it by only calling prep_transhuge_page() when we are certain that the target page is THP. Link: http://lkml.kernel.org/r/20171121021855.50525-1-zi.yan@sent.com Fixes: 8135d8926c08 ("mm: memory_hotplug: memory hotremove supports thp migration") Signed-off-by: Zi Yan Reported-by: Andrea Reale Cc: Naoya Horiguchi Cc: Michal Hocko Cc: "Jérôme Glisse" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/migrate.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 895ec0c4942e68..a2246cf670badb 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -54,7 +54,7 @@ static inline struct page *new_page_nodemask(struct page *page, new_page = __alloc_pages_nodemask(gfp_mask, order, preferred_nid, nodemask); - if (new_page && PageTransHuge(page)) + if (new_page && PageTransHuge(new_page)) prep_transhuge_page(new_page); return new_page; From d983b6251c5535b5c5eb02ce48bf8eeb7417ef65 Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Wed, 29 Nov 2017 16:11:15 -0800 Subject: [PATCH 0265/3326] mm, memcg: fix mem_cgroup_swapout() for THPs commit d08afa149acfd00871484ada6dabc3880524cd1c upstream. Commit d6810d730022 ("memcg, THP, swap: make mem_cgroup_swapout() support THP") changed mem_cgroup_swapout() to support transparent huge page (THP). However the patch missed one location which should be changed for correctly handling THPs. The resulting bug will cause the memory cgroups whose THPs were swapped out to become zombies on deletion. Link: http://lkml.kernel.org/r/20171128161941.20931-1-shakeelb@google.com Fixes: d6810d730022 ("memcg, THP, swap: make mem_cgroup_swapout() support THP") Signed-off-by: Shakeel Butt Acked-by: Johannes Weiner Acked-by: Michal Hocko Cc: Huang Ying Cc: Vladimir Davydov Cc: Greg Thelen Signed-off-by: Greg Kroah-Hartman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 661f046ad3181f..53f7c919b9160b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6044,7 +6044,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) memcg_check_events(memcg, page); if (!mem_cgroup_is_root(memcg)) - css_put(&memcg->css); + css_put_many(&memcg->css, nr_entries); } /** From fa2a9894733bdeaa93c750735b7fdc581ef10024 Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Wed, 29 Nov 2017 16:11:19 -0800 Subject: [PATCH 0266/3326] fs/fat/inode.c: fix sb_rdonly() change commit b6e8e12c0aeb5fbf1bf46c84d58cc93aedede385 upstream. Commit bc98a42c1f7d ("VFS: Convert sb->s_flags & MS_RDONLY to sb_rdonly(sb)") converted fat_remount():new_rdonly from a bool to an int. However fat_remount() depends upon the compiler's conversion of a non-zero integer into boolean `true'. Fix it by switching `new_rdonly' back into a bool. Link: http://lkml.kernel.org/r/87mv3d5x51.fsf@mail.parknet.co.jp Fixes: bc98a42c1f7d0f8 ("VFS: Convert sb->s_flags & MS_RDONLY to sb_rdonly(sb)") Signed-off-by: OGAWA Hirofumi Cc: Joe Perches Cc: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/fat/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 30c52394a7adbc..c7a4dee206b902 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -779,7 +779,7 @@ static void __exit fat_destroy_inodecache(void) static int fat_remount(struct super_block *sb, int *flags, char *data) { - int new_rdonly; + bool new_rdonly; struct msdos_sb_info *sbi = MSDOS_SB(sb); *flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME); From 793a07bef9eb557ebad28cdf9ca30ba41d082c9f Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Wed, 29 Nov 2017 16:11:23 -0800 Subject: [PATCH 0267/3326] autofs: revert "autofs: take more care to not update last_used on path walk" commit 43694d4bf843ddd34519e8e9de983deefeada699 upstream. While commit 092a53452bb7 ("autofs: take more care to not update last_used on path walk") helped (partially) resolve a problem where automounts were not expiring due to aggressive accesses from user space it has a side effect for very large environments. This change helps with the expire problem by making the expire more aggressive but, for very large environments, that means more mount requests from clients. When there are a lot of clients that can mean fairly significant server load increases. It turns out I put the last_used in this position to solve this very problem and failed to update my own thinking of the autofs expire policy. So the patch being reverted introduces a regression which should be fixed. Link: http://lkml.kernel.org/r/151174729420.6162.1832622523537052460.stgit@pluto.themaw.net Fixes: 092a53452b ("autofs: take more care to not update last_used on path walk") Signed-off-by: Ian Kent Reviewed-by: NeilBrown Cc: Al Viro Cc: Colin Walters Cc: David Howells Cc: Ondrej Holy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/autofs4/root.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index d79ced92586140..82e8f6edfb48d0 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -281,8 +281,8 @@ static int autofs4_mount_wait(const struct path *path, bool rcu_walk) pr_debug("waiting for mount name=%pd\n", path->dentry); status = autofs4_wait(sbi, path, NFY_MOUNT); pr_debug("mount wait done status=%d\n", status); - ino->last_used = jiffies; } + ino->last_used = jiffies; return status; } @@ -321,21 +321,16 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path) */ if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) { struct dentry *parent = dentry->d_parent; + struct autofs_info *ino; struct dentry *new; new = d_lookup(parent, &dentry->d_name); if (!new) return NULL; - if (new == dentry) - dput(new); - else { - struct autofs_info *ino; - - ino = autofs4_dentry_ino(new); - ino->last_used = jiffies; - dput(path->dentry); - path->dentry = new; - } + ino = autofs4_dentry_ino(new); + ino->last_used = jiffies; + dput(path->dentry); + path->dentry = new; } return path->dentry; } From f354f4ffe6a371a4ae27a682cea56a7e4e1d74d9 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Wed, 29 Nov 2017 16:11:26 -0800 Subject: [PATCH 0268/3326] autofs: revert "autofs: fix AT_NO_AUTOMOUNT not being honored" commit 5d38f049cee1e1c4a7ac55aa79d37d01ddcc3860 upstream. Commit 42f461482178 ("autofs: fix AT_NO_AUTOMOUNT not being honored") allowed the fstatat(2) system call to properly honor the AT_NO_AUTOMOUNT flag but introduced a semantic change. In order to honor AT_NO_AUTOMOUNT a semantic change was made to the negative dentry case for stat family system calls in follow_automount(). This changed the unconditional triggering of an automount in this case to no longer be done and an error returned instead. This has caused more problems than I expected so reverting the change is needed. In a discussion with Neil Brown it was concluded that the automount(8) daemon can implement this change without kernel modifications. So that will be done instead and the autofs module documentation updated with a description of the problem and what needs to be done by module users for this specific case. Link: http://lkml.kernel.org/r/151174730120.6162.3848002191530283984.stgit@pluto.themaw.net Fixes: 42f4614821 ("autofs: fix AT_NO_AUTOMOUNT not being honored") Signed-off-by: Ian Kent Cc: Neil Brown Cc: Al Viro Cc: David Howells Cc: Colin Walters Cc: Ondrej Holy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/namei.c | 15 +++------------ include/linux/fs.h | 3 ++- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/fs/namei.c b/fs/namei.c index ed8b9488a890c2..62a0db6e6725e7 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1129,18 +1129,9 @@ static int follow_automount(struct path *path, struct nameidata *nd, * of the daemon to instantiate them before they can be used. */ if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | - LOOKUP_OPEN | LOOKUP_CREATE | - LOOKUP_AUTOMOUNT))) { - /* Positive dentry that isn't meant to trigger an - * automount, EISDIR will allow it to be used, - * otherwise there's no mount here "now" so return - * ENOENT. - */ - if (path->dentry->d_inode) - return -EISDIR; - else - return -ENOENT; - } + LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) && + path->dentry->d_inode) + return -EISDIR; if (path->dentry->d_sb->s_user_ns != &init_user_ns) return -EACCES; diff --git a/include/linux/fs.h b/include/linux/fs.h index 9c6c51b7a64459..440281f8564d8a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3069,7 +3069,8 @@ static inline int vfs_lstat(const char __user *name, struct kstat *stat) static inline int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat, int flags) { - return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS); + return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT, + stat, STATX_BASIC_STATS); } static inline int vfs_fstat(int fd, struct kstat *stat) { From be75ad849b9f3b0512e976a81e31a0beea1419ab Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 29 Nov 2017 16:11:30 -0800 Subject: [PATCH 0269/3326] mm/hugetlb: fix NULL-pointer dereference on 5-level paging machine commit f4f0a3d85b50a65a348e2b8635041d6b30f01deb upstream. I made a mistake during converting hugetlb code to 5-level paging: in huge_pte_alloc() we have to use p4d_alloc(), not p4d_offset(). Otherwise it leads to crash -- NULL-pointer dereference in pud_alloc() if p4d table is not yet allocated. It only can happen in 5-level paging mode. In 4-level paging mode p4d_offset() always returns pgd, so we are fine. Link: http://lkml.kernel.org/r/20171122121921.64822-1-kirill.shutemov@linux.intel.com Fixes: c2febafc6773 ("mm: convert generic code to 5-level paging") Signed-off-by: Kirill A. Shutemov Acked-by: Vlastimil Babka Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/hugetlb.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a2233d722ff94b..c539941671b4e7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4625,7 +4625,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, pte_t *pte = NULL; pgd = pgd_offset(mm, addr); - p4d = p4d_offset(pgd, addr); + p4d = p4d_alloc(mm, pgd, addr); + if (!p4d) + return NULL; pud = pud_alloc(mm, p4d, addr); if (pud) { if (sz == PUD_SIZE) { From 28580e75734b8bfc10d0c23ba026737c71c551c8 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 17 Nov 2017 14:50:46 -0500 Subject: [PATCH 0270/3326] btrfs: clear space cache inode generation always commit 8e138e0d92c6c9d3d481674fb14e3439b495be37 upstream. We discovered a box that had double allocations, and suspected the space cache may be to blame. While auditing the write out path I noticed that if we've already setup the space cache we will just carry on. This means that any error we hit after cache_save_setup before we go to actually write the cache out we won't reset the inode generation, so whatever was already written will be considered correct, except it'll be stale. Fix this by _always_ resetting the generation on the block group inode, this way we only ever have valid or invalid cache. With this patch I was no longer able to reproduce cache corruption with dm-log-writes and my bpf error injection tool. Signed-off-by: Josef Bacik Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/extent-tree.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 08698105fa4a86..e4774c02d922ea 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3526,13 +3526,6 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, goto again; } - /* We've already setup this transaction, go ahead and exit */ - if (block_group->cache_generation == trans->transid && - i_size_read(inode)) { - dcs = BTRFS_DC_SETUP; - goto out_put; - } - /* * We want to set the generation to 0, that way if anything goes wrong * from here on out we know not to trust this cache when we load up next @@ -3556,6 +3549,13 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, } WARN_ON(ret); + /* We've already setup this transaction, go ahead and exit */ + if (block_group->cache_generation == trans->transid && + i_size_read(inode)) { + dcs = BTRFS_DC_SETUP; + goto out_put; + } + if (i_size_read(inode) > 0) { ret = btrfs_check_trunc_cache_free_space(fs_info, &fs_info->global_block_rsv); From db77ab54a5e2f740d1ae39326f90c06d5c5a26d4 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 3 Nov 2017 08:00:10 -0400 Subject: [PATCH 0271/3326] nfsd: Fix stateid races between OPEN and CLOSE commit 15ca08d3299682dc49bad73251677b2c5017ef08 upstream. Open file stateids can linger on the nfs4_file list of stateids even after they have been closed. In order to avoid reusing such a stateid, and confusing the client, we need to recheck the nfs4_stid's type after taking the mutex. Otherwise, we risk reusing an old stateid that was already closed, which will confuse clients that expect new stateids to conform to RFC7530 Sections 9.1.4.2 and 16.2.5 or RFC5661 Sections 8.2.2 and 18.2.4. Signed-off-by: Trond Myklebust Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/nfs4state.c | 67 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 59 insertions(+), 8 deletions(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index d386d569edbc41..e5e66e50505236 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3512,7 +3512,9 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) /* ignore lock owners */ if (local->st_stateowner->so_is_open_owner == 0) continue; - if (local->st_stateowner == &oo->oo_owner) { + if (local->st_stateowner != &oo->oo_owner) + continue; + if (local->st_stid.sc_type == NFS4_OPEN_STID) { ret = local; atomic_inc(&ret->st_stid.sc_count); break; @@ -3521,6 +3523,52 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) return ret; } +static __be32 +nfsd4_verify_open_stid(struct nfs4_stid *s) +{ + __be32 ret = nfs_ok; + + switch (s->sc_type) { + default: + break; + case NFS4_CLOSED_STID: + case NFS4_CLOSED_DELEG_STID: + ret = nfserr_bad_stateid; + break; + case NFS4_REVOKED_DELEG_STID: + ret = nfserr_deleg_revoked; + } + return ret; +} + +/* Lock the stateid st_mutex, and deal with races with CLOSE */ +static __be32 +nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp) +{ + __be32 ret; + + mutex_lock(&stp->st_mutex); + ret = nfsd4_verify_open_stid(&stp->st_stid); + if (ret != nfs_ok) + mutex_unlock(&stp->st_mutex); + return ret; +} + +static struct nfs4_ol_stateid * +nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) +{ + struct nfs4_ol_stateid *stp; + for (;;) { + spin_lock(&fp->fi_lock); + stp = nfsd4_find_existing_open(fp, open); + spin_unlock(&fp->fi_lock); + if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok) + break; + nfs4_put_stid(&stp->st_stid); + } + return stp; +} + static struct nfs4_openowner * alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, struct nfsd4_compound_state *cstate) @@ -3565,6 +3613,7 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) mutex_init(&stp->st_mutex); mutex_lock(&stp->st_mutex); +retry: spin_lock(&oo->oo_owner.so_client->cl_lock); spin_lock(&fp->fi_lock); @@ -3589,7 +3638,11 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) spin_unlock(&fp->fi_lock); spin_unlock(&oo->oo_owner.so_client->cl_lock); if (retstp) { - mutex_lock(&retstp->st_mutex); + /* Handle races with CLOSE */ + if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { + nfs4_put_stid(&retstp->st_stid); + goto retry; + } /* To keep mutex tracking happy */ mutex_unlock(&stp->st_mutex); stp = retstp; @@ -4410,9 +4463,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf status = nfs4_check_deleg(cl, open, &dp); if (status) goto out; - spin_lock(&fp->fi_lock); - stp = nfsd4_find_existing_open(fp, open); - spin_unlock(&fp->fi_lock); + stp = nfsd4_find_and_lock_existing_open(fp, open); } else { open->op_file = NULL; status = nfserr_bad_stateid; @@ -4426,7 +4477,6 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf */ if (stp) { /* Stateid was found, this is an OPEN upgrade */ - mutex_lock(&stp->st_mutex); status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); if (status) { mutex_unlock(&stp->st_mutex); @@ -5317,7 +5367,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) bool unhashed; LIST_HEAD(reaplist); - s->st_stid.sc_type = NFS4_CLOSED_STID; spin_lock(&clp->cl_lock); unhashed = unhash_open_stateid(s, &reaplist); @@ -5357,10 +5406,12 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, nfsd4_bump_seqid(cstate, status); if (status) goto out; + + stp->st_stid.sc_type = NFS4_CLOSED_STID; nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); - mutex_unlock(&stp->st_mutex); nfsd4_close_open_stateid(stp); + mutex_unlock(&stp->st_mutex); /* put reference from nfs4_preprocess_seqid_op */ nfs4_put_stid(&stp->st_stid); From 73cfeab6755ccd442b689ea15769b7e9342c471e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 3 Nov 2017 08:00:11 -0400 Subject: [PATCH 0272/3326] nfsd: Fix another OPEN stateid race commit d8a1a000555ecd1b824ac1ed6df8fe364dfbbbb0 upstream. If nfsd4_process_open2() is initialising a new stateid, and yet the call to nfs4_get_vfs_file() fails for some reason, then we must declare the stateid closed, and unhash it before dropping the mutex. Right now, we unhash the stateid after dropping the mutex, and without changing the stateid type, meaning that another OPEN could theoretically look it up and attempt to use it. Reported-by: Andrew W Elble Signed-off-by: Trond Myklebust Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/nfs4state.c | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index e5e66e50505236..30f35bc4dcf62a 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4452,6 +4452,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf struct nfs4_ol_stateid *stp = NULL; struct nfs4_delegation *dp = NULL; __be32 status; + bool new_stp = false; /* * Lookup file; if found, lookup stateid and check open request, @@ -4471,11 +4472,19 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf goto out; } + if (!stp) { + stp = init_open_stateid(fp, open); + if (!open->op_stp) + new_stp = true; + } + /* * OPEN the file, or upgrade an existing OPEN. * If truncate fails, the OPEN fails. + * + * stp is already locked. */ - if (stp) { + if (!new_stp) { /* Stateid was found, this is an OPEN upgrade */ status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); if (status) { @@ -4483,22 +4492,11 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf goto out; } } else { - /* stp is returned locked. */ - stp = init_open_stateid(fp, open); - /* See if we lost the race to some other thread */ - if (stp->st_access_bmap != 0) { - status = nfs4_upgrade_open(rqstp, fp, current_fh, - stp, open); - if (status) { - mutex_unlock(&stp->st_mutex); - goto out; - } - goto upgrade_out; - } status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); if (status) { - mutex_unlock(&stp->st_mutex); + stp->st_stid.sc_type = NFS4_CLOSED_STID; release_open_stateid(stp); + mutex_unlock(&stp->st_mutex); goto out; } @@ -4507,7 +4505,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf if (stp->st_clnt_odstate == open->op_odstate) open->op_odstate = NULL; } -upgrade_out: + nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); mutex_unlock(&stp->st_mutex); From b78da96d2882bea2ec7184dc1eba3e07e829364d Mon Sep 17 00:00:00 2001 From: Naofumi Honda Date: Thu, 9 Nov 2017 10:57:16 -0500 Subject: [PATCH 0273/3326] nfsd: fix panic in posix_unblock_lock called from nfs4_laundromat commit 64ebe12494fd5d193f014ce38e1fd83cc57883c8 upstream. From kernel 4.9, my two nfsv4 servers sometimes suffer from "panic: unable to handle kernel page request" in posix_unblock_lock() called from nfs4_laundromat(). These panics diseappear if we revert the commit "nfsd: add a LRU list for blocked locks". The cause appears to be a typo in nfs4_laundromat(), which is also present in nfs4_state_shutdown_net(). Fixes: 7919d0a27f1e "nfsd: add a LRU list for blocked locks" Cc: jlayton@redhat.com Reveiwed-by: Jeff Layton Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/nfs4state.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 30f35bc4dcf62a..a439a70177a46e 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4732,7 +4732,7 @@ nfs4_laundromat(struct nfsd_net *nn) spin_unlock(&nn->blocked_locks_lock); while (!list_empty(&reaplist)) { - nbl = list_first_entry(&nn->blocked_locks_lru, + nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock, nbl_lru); list_del_init(&nbl->nbl_lru); posix_unblock_lock(&nbl->nbl_lock); @@ -7152,7 +7152,7 @@ nfs4_state_shutdown_net(struct net *net) spin_unlock(&nn->blocked_locks_lock); while (!list_empty(&reaplist)) { - nbl = list_first_entry(&nn->blocked_locks_lru, + nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock, nbl_lru); list_del_init(&nbl->nbl_lru); posix_unblock_lock(&nbl->nbl_lock); From 721872a1997c4b8aae80ed2e6ef9dc05c53383f6 Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Fri, 10 Nov 2017 11:04:52 +0100 Subject: [PATCH 0274/3326] crypto: algif_aead - skip SGL entries with NULL page commit 8e1fa89aa8bc2870009b4486644e4a58f2e2a4f5 upstream. The TX SGL may contain SGL entries that are assigned a NULL page. This may happen if a multi-stage AIO operation is performed where the data for each stage is pointed to by one SGL entry. Upon completion of that stage, af_alg_pull_tsgl will assign NULL to the SGL entry. The NULL cipher used to copy the AAD from TX SGL to the destination buffer, however, cannot handle the case where the SGL starts with an SGL entry having a NULL page. Thus, the code needs to advance the start pointer into the SGL to the first non-NULL entry. This fixes a crash visible on Intel x86 32 bit using the libkcapi test suite. Fixes: 72548b093ee38 ("crypto: algif_aead - copy AAD from src to dst") Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/algif_aead.c | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 516b38c3a16955..ac8f101ab29c65 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -101,10 +101,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, struct aead_tfm *aeadc = pask->private; struct crypto_aead *tfm = aeadc->aead; struct crypto_skcipher *null_tfm = aeadc->null_tfm; - unsigned int as = crypto_aead_authsize(tfm); + unsigned int i, as = crypto_aead_authsize(tfm); struct af_alg_async_req *areq; - struct af_alg_tsgl *tsgl; - struct scatterlist *src; + struct af_alg_tsgl *tsgl, *tmp; + struct scatterlist *rsgl_src, *tsgl_src = NULL; int err = 0; size_t used = 0; /* [in] TX bufs to be en/decrypted */ size_t outlen = 0; /* [out] RX bufs produced by kernel */ @@ -178,7 +178,22 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, } processed = used + ctx->aead_assoclen; - tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list); + list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) { + for (i = 0; i < tsgl->cur; i++) { + struct scatterlist *process_sg = tsgl->sg + i; + + if (!(process_sg->length) || !sg_page(process_sg)) + continue; + tsgl_src = process_sg; + break; + } + if (tsgl_src) + break; + } + if (processed && !tsgl_src) { + err = -EFAULT; + goto free; + } /* * Copy of AAD from source to destination @@ -194,7 +209,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, */ /* Use the RX SGL as source (and destination) for crypto op. */ - src = areq->first_rsgl.sgl.sg; + rsgl_src = areq->first_rsgl.sgl.sg; if (ctx->enc) { /* @@ -207,7 +222,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, * v v * RX SGL: AAD || PT || Tag */ - err = crypto_aead_copy_sgl(null_tfm, tsgl->sg, + err = crypto_aead_copy_sgl(null_tfm, tsgl_src, areq->first_rsgl.sgl.sg, processed); if (err) goto free; @@ -225,7 +240,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, */ /* Copy AAD || CT to RX SGL buffer for in-place operation. */ - err = crypto_aead_copy_sgl(null_tfm, tsgl->sg, + err = crypto_aead_copy_sgl(null_tfm, tsgl_src, areq->first_rsgl.sgl.sg, outlen); if (err) goto free; @@ -257,11 +272,11 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, areq->tsgl); } else /* no RX SGL present (e.g. authentication only) */ - src = areq->tsgl; + rsgl_src = areq->tsgl; } /* Initialize the crypto operation */ - aead_request_set_crypt(&areq->cra_u.aead_req, src, + aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src, areq->first_rsgl.sgl.sg, used, ctx->iv); aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); aead_request_set_tfm(&areq->cra_u.aead_req, tfm); From 7f21961a2d7bfe45f734eab5b0ce6a40ee128b17 Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Fri, 10 Nov 2017 13:20:55 +0100 Subject: [PATCH 0275/3326] crypto: af_alg - remove locking in async callback commit 7d2c3f54e6f646887d019faa45f35d6fe9fe82ce upstream. The code paths protected by the socket-lock do not use or modify the socket in a non-atomic fashion. The actions pertaining the socket do not even need to be handled as an atomic operation. Thus, the socket-lock can be safely ignored. This fixes a bug regarding scheduling in atomic as the callback function may be invoked in interrupt context. In addition, the sock_hold is moved before the AIO encrypt/decrypt operation to ensure that the socket is always present. This avoids a tiny race window where the socket is unprotected and yet used by the AIO operation. Finally, the release of resources for a crypto operation is moved into a common function of af_alg_free_resources. Fixes: e870456d8e7c8 ("crypto: algif_skcipher - overhaul memory management") Fixes: d887c52d6ae43 ("crypto: algif_aead - overhaul memory management") Reported-by: Romain Izard Signed-off-by: Stephan Mueller Tested-by: Romain Izard Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/af_alg.c | 21 ++++++++++++++------- crypto/algif_aead.c | 23 ++++++++++++----------- crypto/algif_skcipher.c | 23 ++++++++++++----------- include/crypto/if_alg.h | 1 + 4 files changed, 39 insertions(+), 29 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 337cf382718ee3..a72659f452a5bb 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -1047,6 +1047,18 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page, } EXPORT_SYMBOL_GPL(af_alg_sendpage); +/** + * af_alg_free_resources - release resources required for crypto request + */ +void af_alg_free_resources(struct af_alg_async_req *areq) +{ + struct sock *sk = areq->sk; + + af_alg_free_areq_sgls(areq); + sock_kfree_s(sk, areq, areq->areqlen); +} +EXPORT_SYMBOL_GPL(af_alg_free_resources); + /** * af_alg_async_cb - AIO callback handler * @@ -1063,18 +1075,13 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err) struct kiocb *iocb = areq->iocb; unsigned int resultlen; - lock_sock(sk); - /* Buffer size written by crypto operation. */ resultlen = areq->outlen; - af_alg_free_areq_sgls(areq); - sock_kfree_s(sk, areq, areq->areqlen); - __sock_put(sk); + af_alg_free_resources(areq); + sock_put(sk); iocb->ki_complete(iocb, err ? err : resultlen, 0); - - release_sock(sk); } EXPORT_SYMBOL_GPL(af_alg_async_cb); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index ac8f101ab29c65..d0b45145cb307b 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -283,12 +283,23 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { /* AIO operation */ + sock_hold(sk); areq->iocb = msg->msg_iocb; aead_request_set_callback(&areq->cra_u.aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_async_cb, areq); err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : crypto_aead_decrypt(&areq->cra_u.aead_req); + + /* AIO operation in progress */ + if (err == -EINPROGRESS || err == -EBUSY) { + /* Remember output size that will be generated. */ + areq->outlen = outlen; + + return -EIOCBQUEUED; + } + + sock_put(sk); } else { /* Synchronous operation */ aead_request_set_callback(&areq->cra_u.aead_req, @@ -300,19 +311,9 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, &ctx->completion); } - /* AIO operation in progress */ - if (err == -EINPROGRESS) { - sock_hold(sk); - - /* Remember output size that will be generated. */ - areq->outlen = outlen; - - return -EIOCBQUEUED; - } free: - af_alg_free_areq_sgls(areq); - sock_kfree_s(sk, areq, areq->areqlen); + af_alg_free_resources(areq); return err ? err : outlen; } diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 8ae4170aaeb4f2..30ee2a8e8f4292 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -117,6 +117,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { /* AIO operation */ + sock_hold(sk); areq->iocb = msg->msg_iocb; skcipher_request_set_callback(&areq->cra_u.skcipher_req, CRYPTO_TFM_REQ_MAY_SLEEP, @@ -124,6 +125,16 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, err = ctx->enc ? crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); + + /* AIO operation in progress */ + if (err == -EINPROGRESS || err == -EBUSY) { + /* Remember output size that will be generated. */ + areq->outlen = len; + + return -EIOCBQUEUED; + } + + sock_put(sk); } else { /* Synchronous operation */ skcipher_request_set_callback(&areq->cra_u.skcipher_req, @@ -137,19 +148,9 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, &ctx->completion); } - /* AIO operation in progress */ - if (err == -EINPROGRESS) { - sock_hold(sk); - - /* Remember output size that will be generated. */ - areq->outlen = len; - - return -EIOCBQUEUED; - } free: - af_alg_free_areq_sgls(areq); - sock_kfree_s(sk, areq, areq->areqlen); + af_alg_free_resources(areq); return err ? err : len; } diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 75ec9c662268bc..aeec003a566b56 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -255,6 +255,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, unsigned int ivsize); ssize_t af_alg_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); +void af_alg_free_resources(struct af_alg_async_req *areq); void af_alg_async_cb(struct crypto_async_request *_req, int err); unsigned int af_alg_poll(struct file *file, struct socket *sock, poll_table *wait); From 861eae231b0437427676a4ed323d31097002d5a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ondrej=20Mosn=C3=A1=C4=8Dek?= Date: Thu, 23 Nov 2017 13:49:06 +0100 Subject: [PATCH 0276/3326] crypto: skcipher - Fix skcipher_walk_aead_common commit c14ca8386539a298c1c19b003fe55e37d0f0e89c upstream. The skcipher_walk_aead_common function calls scatterwalk_copychunks on the input and output walks to skip the associated data. If the AD end at an SG list entry boundary, then after these calls the walks will still be pointing to the end of the skipped region. These offsets are later checked for alignment in skcipher_walk_next, so the skcipher_walk may detect the alignment incorrectly. This patch fixes it by calling scatterwalk_done after the copychunks calls to ensure that the offsets refer to the right SG list entry. Fixes: b286d8b1a690 ("crypto: skcipher - Add skcipher walk interface") Signed-off-by: Ondrej Mosnacek Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/skcipher.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crypto/skcipher.c b/crypto/skcipher.c index d5692e35fab1f0..778e0ff42bfa80 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -522,6 +522,9 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); + scatterwalk_done(&walk->in, 0, walk->total); + scatterwalk_done(&walk->out, 0, walk->total); + walk->iv = req->iv; walk->oiv = req->iv; From 84779085fa10014b9e8208d7e71b54bced73075c Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Thu, 2 Nov 2017 13:03:42 +0300 Subject: [PATCH 0277/3326] lockd: lost rollback of set_grace_period() in lockd_down_net() commit 3a2b19d1ee5633f76ae8a88da7bc039a5d1732aa upstream. Commit efda760fe95ea ("lockd: fix lockd shutdown race") is incorrect, it removes lockd_manager and disarm grace_period_end for init_net only. If nfsd was started from another net namespace lockd_up_net() calls set_grace_period() that adds lockd_manager into per-netns list and queues grace_period_end delayed work. These action should be reverted in lockd_down_net(). Otherwise it can lead to double list_add on after restart nfsd in netns, and to use-after-free if non-disarmed delayed work will be executed after netns destroy. Fixes: efda760fe95e ("lockd: fix lockd shutdown race") Signed-off-by: Vasily Averin Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- fs/lockd/svc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index f04ecfc7ece0c9..45e96549ebd2ab 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -274,6 +274,8 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net) if (ln->nlmsvc_users) { if (--ln->nlmsvc_users == 0) { nlm_shutdown_hosts_net(net); + cancel_delayed_work_sync(&ln->grace_period_end); + locks_end_grace(&ln->lockd_manager); svc_shutdown_net(serv, net); dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net); } From b367ea94535dbdede5e00110339b3e4c9bdc617d Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 24 Nov 2017 16:23:15 +0100 Subject: [PATCH 0278/3326] s390: revert ELF_ET_DYN_BASE base changes commit 345f8f34bb473241d62803951c18a844dd705f8d upstream. This reverts commit a73dc5370e153ac63718d850bddf0c9aa9d871e6. Reducing the base address for 31-bit PIE executables from (STACK_TOP/3)*2 to 4MB broke several compat programs which use -fpie to move the executable out of the lower 16MB. Signed-off-by: Martin Schwidefsky Signed-off-by: Greg Kroah-Hartman --- arch/s390/include/asm/elf.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 9a3cb3983c0140..1a61b1b997f2a0 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -194,13 +194,14 @@ struct arch_elf_state { #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE PAGE_SIZE -/* - * This is the base location for PIE (ET_DYN with INTERP) loads. On - * 64-bit, this is raised to 4GB to leave the entire 32-bit address - * space open for things that want to use the area for 32-bit pointers. - */ -#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \ - 0x100000000UL) +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. 64-bit + tasks are aligned to 4GB. */ +#define ELF_ET_DYN_BASE (is_compat_task() ? \ + (STACK_TOP / 3 * 2) : \ + (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. */ From 4479ade38b14f53673b0843308fe24344b37cba5 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 16 Nov 2017 09:50:19 +0100 Subject: [PATCH 0279/3326] drm: omapdrm: Fix DPI on platforms using the DSI VDDS commit bf25dac38f71d392a31ec074f55cbc941f1eaf1d upstream. Commit d178e034d565 ("drm: omapdrm: Move FEAT_DPI_USES_VDDS_DSI feature to dpi code") replaced usage of platform data version with SoC matching to configure DPI VDDS. The SoC match entries were incorrect, they should have matched on the machine name instead of the SoC family. Fix it. The result was observed on OpenPandora with OMAP3530 where the panel only had the Blue channel and Red&Green were missing. It was not observed on GTA04 with DM3730. Fixes: d178e034d565 ("drm: omapdrm: Move FEAT_DPI_USES_VDDS_DSI feature to dpi code") Signed-off-by: Laurent Pinchart Reported-by: H. Nikolaus Schaller Tested-by: H. Nikolaus Schaller Signed-off-by: Tomi Valkeinen Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/omapdrm/dss/dpi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index daf286fc8a4082..ca1e3b489540fe 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -566,8 +566,8 @@ static int dpi_verify_pll(struct dss_pll *pll) } static const struct soc_device_attribute dpi_soc_devices[] = { - { .family = "OMAP3[456]*" }, - { .family = "[AD]M37*" }, + { .machine = "OMAP3[456]*" }, + { .machine = "[AD]M37*" }, { /* sentinel */ } }; From a3ed24cd28f2db6f6f5b2009565a2463c0df4af0 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 20 Nov 2017 11:51:40 +0200 Subject: [PATCH 0280/3326] omapdrm: hdmi4: Correct the SoC revision matching commit 23970e150a0a49f9a966c46e5d22fed06226098f upstream. I believe the intention of the commit 2c9fc9bf45f8 ("drm: omapdrm: Move FEAT_HDMI_* features to hdmi4 driver") was to identify omap4430 ES1.x, omap4430 ES2.x and other OMAP4 revisions, like omap4460. By using family=OMAP4 in the match the code will treat omap4460 ES1.x in a same way as it would treat omap4430 ES1.x This breaks HDMI audio on OMAP4460 devices (PandaES for example). Correct the match rule so we are not going to get false positive match. Fixes: 2c9fc9bf45f8 ("drm: omapdrm: Move FEAT_HDMI_* features to hdmi4 driver") Signed-off-by: Peter Ujfalusi Reviewed-by: Laurent Pinchart Signed-off-by: Tomi Valkeinen Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/omapdrm/dss/hdmi4_core.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index 365cf07daa017e..c3453f3bd603f4 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -889,25 +889,36 @@ struct hdmi4_features { bool audio_use_mclk; }; -static const struct hdmi4_features hdmi4_es1_features = { +static const struct hdmi4_features hdmi4430_es1_features = { .cts_swmode = false, .audio_use_mclk = false, }; -static const struct hdmi4_features hdmi4_es2_features = { +static const struct hdmi4_features hdmi4430_es2_features = { .cts_swmode = true, .audio_use_mclk = false, }; -static const struct hdmi4_features hdmi4_es3_features = { +static const struct hdmi4_features hdmi4_features = { .cts_swmode = true, .audio_use_mclk = true, }; static const struct soc_device_attribute hdmi4_soc_devices[] = { - { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features }, - { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features }, - { .family = "OMAP4", .data = &hdmi4_es3_features }, + { + .machine = "OMAP4430", + .revision = "ES1.?", + .data = &hdmi4430_es1_features, + }, + { + .machine = "OMAP4430", + .revision = "ES2.?", + .data = &hdmi4430_es2_features, + }, + { + .family = "OMAP4", + .data = &hdmi4_features, + }, { /* sentinel */ } }; From 69af22696bc737dfe3444d2ece5db000fbff5c35 Mon Sep 17 00:00:00 2001 From: John Johansen Date: Wed, 22 Nov 2017 07:33:38 -0800 Subject: [PATCH 0281/3326] apparmor: fix oops in audit_signal_cb hook commit b12cbb21586277f72533769832c24cc6c1d60ab3 upstream. The apparmor_audit_data struct ordering got messed up during a merge conflict, resulting in the signal integer and peer pointer being in a union instead of a struct. For most of the 4.13 and 4.14 life cycle, this was hidden by commit 651e28c5537a ("apparmor: add base infastructure for socket mediation") which fixed the apparmor_audit_data struct when its data was added. When that commit was reverted in -rc7 the signal audit bug was exposed, and unfortunately it never showed up in any of the testing until after 4.14 was released. Shaun Khan, Zephaniah E. Loss-Cutler-Hull filed nearly simultaneous bug reports (with different oopes, the smaller of which is included below). Full credit goes to Tetsuo Handa for jumping on this as well and noticing the audit data struct problem and reporting it. [ 76.178568] BUG: unable to handle kernel paging request at ffffffff0eee3bc0 [ 76.178579] IP: audit_signal_cb+0x6c/0xe0 [ 76.178581] PGD 1a640a067 P4D 1a640a067 PUD 0 [ 76.178586] Oops: 0000 [#1] PREEMPT SMP [ 76.178589] Modules linked in: fuse rfcomm bnep usblp uvcvideo btusb btrtl btbcm btintel bluetooth ecdh_generic ip6table_filter ip6_tables xt_tcpudp nf_conntrack_ipv4 nf_defrag_ipv4 xt_conntrack nf_conntrack iptable_filter ip_tables x_tables intel_rapl joydev wmi_bmof serio_raw iwldvm iwlwifi shpchp kvm_intel kvm irqbypass autofs4 algif_skcipher nls_iso8859_1 nls_cp437 crc32_pclmul ghash_clmulni_intel [ 76.178620] CPU: 0 PID: 10675 Comm: pidgin Not tainted 4.14.0-f1-dirty #135 [ 76.178623] Hardware name: Hewlett-Packard HP EliteBook Folio 9470m/18DF, BIOS 68IBD Ver. F.62 10/22/2015 [ 76.178625] task: ffff9c7a94c31dc0 task.stack: ffffa09b02a4c000 [ 76.178628] RIP: 0010:audit_signal_cb+0x6c/0xe0 [ 76.178631] RSP: 0018:ffffa09b02a4fc08 EFLAGS: 00010292 [ 76.178634] RAX: ffffa09b02a4fd60 RBX: ffff9c7aee0741f8 RCX: 0000000000000000 [ 76.178636] RDX: ffffffffee012290 RSI: 0000000000000006 RDI: ffff9c7a9493d800 [ 76.178638] RBP: ffffa09b02a4fd40 R08: 000000000000004d R09: ffffa09b02a4fc46 [ 76.178641] R10: ffffa09b02a4fcb8 R11: ffff9c7ab44f5072 R12: ffffa09b02a4fd40 [ 76.178643] R13: ffffffff9e447be0 R14: ffff9c7a94c31dc0 R15: 0000000000000001 [ 76.178646] FS: 00007f8b11ba2a80(0000) GS:ffff9c7afea00000(0000) knlGS:0000000000000000 [ 76.178648] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 76.178650] CR2: ffffffff0eee3bc0 CR3: 00000003d5209002 CR4: 00000000001606f0 [ 76.178652] Call Trace: [ 76.178660] common_lsm_audit+0x1da/0x780 [ 76.178665] ? d_absolute_path+0x60/0x90 [ 76.178669] ? aa_check_perms+0xcd/0xe0 [ 76.178672] aa_check_perms+0xcd/0xe0 [ 76.178675] profile_signal_perm.part.0+0x90/0xa0 [ 76.178679] aa_may_signal+0x16e/0x1b0 [ 76.178686] apparmor_task_kill+0x51/0x120 [ 76.178690] security_task_kill+0x44/0x60 [ 76.178695] group_send_sig_info+0x25/0x60 [ 76.178699] kill_pid_info+0x36/0x60 [ 76.178703] SYSC_kill+0xdb/0x180 [ 76.178707] ? preempt_count_sub+0x92/0xd0 [ 76.178712] ? _raw_write_unlock_irq+0x13/0x30 [ 76.178716] ? task_work_run+0x6a/0x90 [ 76.178720] ? exit_to_usermode_loop+0x80/0xa0 [ 76.178723] entry_SYSCALL_64_fastpath+0x13/0x94 [ 76.178727] RIP: 0033:0x7f8b0e58b767 [ 76.178729] RSP: 002b:00007fff19efd4d8 EFLAGS: 00000206 ORIG_RAX: 000000000000003e [ 76.178732] RAX: ffffffffffffffda RBX: 0000557f3e3c2050 RCX: 00007f8b0e58b767 [ 76.178735] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 000000000000263b [ 76.178737] RBP: 0000000000000000 R08: 0000557f3e3c2270 R09: 0000000000000001 [ 76.178739] R10: 000000000000022d R11: 0000000000000206 R12: 0000000000000000 [ 76.178741] R13: 0000000000000001 R14: 0000557f3e3c13c0 R15: 0000000000000000 [ 76.178745] Code: 48 8b 55 18 48 89 df 41 b8 20 00 08 01 5b 5d 48 8b 42 10 48 8b 52 30 48 63 48 4c 48 8b 44 c8 48 31 c9 48 8b 70 38 e9 f4 fd 00 00 <48> 8b 14 d5 40 27 e5 9e 48 c7 c6 7d 07 19 9f 48 89 df e8 fd 35 [ 76.178794] RIP: audit_signal_cb+0x6c/0xe0 RSP: ffffa09b02a4fc08 [ 76.178796] CR2: ffffffff0eee3bc0 [ 76.178799] ---[ end trace 514af9529297f1a3 ]--- Fixes: cd1dbf76b23d ("apparmor: add the ability to mediate signals") Reported-by: Zephaniah E. Loss-Cutler-Hull Reported-by: Shuah Khan Suggested-by: Tetsuo Handa Tested-by: Ivan Kozik Tested-by: Zephaniah E. Loss-Cutler-Hull Tested-by: Christian Boltz Tested-by: Shuah Khan Signed-off-by: John Johansen Signed-off-by: Greg Kroah-Hartman --- security/apparmor/include/audit.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h index 620e811696592d..4ac09511871702 100644 --- a/security/apparmor/include/audit.h +++ b/security/apparmor/include/audit.h @@ -121,17 +121,19 @@ struct apparmor_audit_data { /* these entries require a custom callback fn */ struct { struct aa_label *peer; - struct { - const char *target; - kuid_t ouid; - } fs; + union { + struct { + const char *target; + kuid_t ouid; + } fs; + int signal; + }; }; struct { struct aa_profile *profile; const char *ns; long pos; } iface; - int signal; struct { int rlim; unsigned long max; From e1b4e001b22a12b67c890f4611bcf55285fd0fc5 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 20 Nov 2017 17:41:29 +0000 Subject: [PATCH 0282/3326] arm64: module-plts: factor out PLT generation code for ftrace commit 7e8b9c1d2e2f5f45db7d40b50d14f606097c25de upstream. To allow the ftrace trampoline code to reuse the PLT entry routines, factor it out and move it into asm/module.h. Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/module.h | 44 +++++++++++++++++++++++++++++++++ arch/arm64/kernel/module-plts.c | 38 ++-------------------------- 2 files changed, 46 insertions(+), 36 deletions(-) diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index 19bd97671bb8d4..11d4aaee82e11e 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -45,4 +45,48 @@ extern u64 module_alloc_base; #define module_alloc_base ((u64)_etext - MODULES_VSIZE) #endif +struct plt_entry { + /* + * A program that conforms to the AArch64 Procedure Call Standard + * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or + * IP1 (x17) may be inserted at any branch instruction that is + * exposed to a relocation that supports long branches. Since that + * is exactly what we are dealing with here, we are free to use x16 + * as a scratch register in the PLT veneers. + */ + __le32 mov0; /* movn x16, #0x.... */ + __le32 mov1; /* movk x16, #0x...., lsl #16 */ + __le32 mov2; /* movk x16, #0x...., lsl #32 */ + __le32 br; /* br x16 */ +}; + +static inline struct plt_entry get_plt_entry(u64 val) +{ + /* + * MOVK/MOVN/MOVZ opcode: + * +--------+------------+--------+-----------+-------------+---------+ + * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] | + * +--------+------------+--------+-----------+-------------+---------+ + * + * Rd := 0x10 (x16) + * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32) + * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ) + * sf := 1 (64-bit variant) + */ + return (struct plt_entry){ + cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5), + cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5), + cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5), + cpu_to_le32(0xd61f0200) + }; +} + +static inline bool plt_entries_equal(const struct plt_entry *a, + const struct plt_entry *b) +{ + return a->mov0 == b->mov0 && + a->mov1 == b->mov1 && + a->mov2 == b->mov2; +} + #endif /* __ASM_MODULE_H */ diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index d05dbe658409b2..ebff6c155cac29 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -11,21 +11,6 @@ #include #include -struct plt_entry { - /* - * A program that conforms to the AArch64 Procedure Call Standard - * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or - * IP1 (x17) may be inserted at any branch instruction that is - * exposed to a relocation that supports long branches. Since that - * is exactly what we are dealing with here, we are free to use x16 - * as a scratch register in the PLT veneers. - */ - __le32 mov0; /* movn x16, #0x.... */ - __le32 mov1; /* movk x16, #0x...., lsl #16 */ - __le32 mov2; /* movk x16, #0x...., lsl #32 */ - __le32 br; /* br x16 */ -}; - static bool in_init(const struct module *mod, void *loc) { return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size; @@ -40,33 +25,14 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, int i = pltsec->plt_num_entries; u64 val = sym->st_value + rela->r_addend; - /* - * MOVK/MOVN/MOVZ opcode: - * +--------+------------+--------+-----------+-------------+---------+ - * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] | - * +--------+------------+--------+-----------+-------------+---------+ - * - * Rd := 0x10 (x16) - * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32) - * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ) - * sf := 1 (64-bit variant) - */ - plt[i] = (struct plt_entry){ - cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5), - cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5), - cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5), - cpu_to_le32(0xd61f0200) - }; + plt[i] = get_plt_entry(val); /* * Check if the entry we just created is a duplicate. Given that the * relocations are sorted, this will be the last entry we allocated. * (if one exists). */ - if (i > 0 && - plt[i].mov0 == plt[i - 1].mov0 && - plt[i].mov1 == plt[i - 1].mov1 && - plt[i].mov2 == plt[i - 1].mov2) + if (i > 0 && plt_entries_equal(plt + i, plt + i - 1)) return (u64)&plt[i - 1]; pltsec->plt_num_entries++; From e9b2a6680fb6173819598cba411522d37cb4585a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 20 Nov 2017 17:41:30 +0000 Subject: [PATCH 0283/3326] arm64: ftrace: emit ftrace-mod.o contents through code commit be0f272bfc83797f70d44faca86954df62e2bbc0 upstream. When building the arm64 kernel with both CONFIG_ARM64_MODULE_PLTS and CONFIG_DYNAMIC_FTRACE enabled, the ftrace-mod.o object file is built with the kernel and contains a trampoline that is linked into each module, so that modules can be loaded far away from the kernel and still reach the ftrace entry point in the core kernel with an ordinary relative branch, as is emitted by the compiler instrumentation code dynamic ftrace relies on. In order to be able to build out of tree modules, this object file needs to be included into the linux-headers or linux-devel packages, which is undesirable, as it makes arm64 a special case (although a precedent does exist for 32-bit PPC). Given that the trampoline essentially consists of a PLT entry, let's not bother with a source or object file for it, and simply patch it in whenever the trampoline is being populated, using the existing PLT support routines. Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- arch/arm64/Makefile | 3 --- arch/arm64/include/asm/module.h | 2 +- arch/arm64/kernel/Makefile | 3 --- arch/arm64/kernel/ftrace-mod.S | 18 ------------------ arch/arm64/kernel/ftrace.c | 14 ++++++++------ arch/arm64/kernel/module-plts.c | 12 ++++++++++++ arch/arm64/kernel/module.lds | 1 + 7 files changed, 22 insertions(+), 31 deletions(-) delete mode 100644 arch/arm64/kernel/ftrace-mod.S diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 939b310913cf38..3eb4397150df8c 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -77,9 +77,6 @@ endif ifeq ($(CONFIG_ARM64_MODULE_PLTS),y) KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds -ifeq ($(CONFIG_DYNAMIC_FTRACE),y) -KBUILD_LDFLAGS_MODULE += $(objtree)/arch/arm64/kernel/ftrace-mod.o -endif endif # Default value diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index 11d4aaee82e11e..4f766178fa6ff3 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -32,7 +32,7 @@ struct mod_arch_specific { struct mod_plt_sec init; /* for CONFIG_DYNAMIC_FTRACE */ - void *ftrace_trampoline; + struct plt_entry *ftrace_trampoline; }; #endif diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 0029e13adb596a..2f5ff2a65db3f4 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -63,6 +63,3 @@ extra-y += $(head-y) vmlinux.lds ifeq ($(CONFIG_DEBUG_EFI),y) AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\"" endif - -# will be included by each individual module but not by the core kernel itself -extra-$(CONFIG_DYNAMIC_FTRACE) += ftrace-mod.o diff --git a/arch/arm64/kernel/ftrace-mod.S b/arch/arm64/kernel/ftrace-mod.S deleted file mode 100644 index 00c4025be4ff83..00000000000000 --- a/arch/arm64/kernel/ftrace-mod.S +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright (C) 2017 Linaro Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include - - .section ".text.ftrace_trampoline", "ax" - .align 3 -0: .quad 0 -__ftrace_trampoline: - ldr x16, 0b - br x16 -ENDPROC(__ftrace_trampoline) diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index c13b1fca0e5baf..50986e388d2b27 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) if (offset < -SZ_128M || offset >= SZ_128M) { #ifdef CONFIG_ARM64_MODULE_PLTS - unsigned long *trampoline; + struct plt_entry trampoline; struct module *mod; /* @@ -104,22 +104,24 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) * is added in the future, but for now, the pr_err() below * deals with a theoretical issue only. */ - trampoline = (unsigned long *)mod->arch.ftrace_trampoline; - if (trampoline[0] != addr) { - if (trampoline[0] != 0) { + trampoline = get_plt_entry(addr); + if (!plt_entries_equal(mod->arch.ftrace_trampoline, + &trampoline)) { + if (!plt_entries_equal(mod->arch.ftrace_trampoline, + &(struct plt_entry){})) { pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); return -EINVAL; } /* point the trampoline to our ftrace entry point */ module_disable_ro(mod); - trampoline[0] = addr; + *mod->arch.ftrace_trampoline = trampoline; module_enable_ro(mod, true); /* update trampoline before patching in the branch */ smp_wmb(); } - addr = (unsigned long)&trampoline[1]; + addr = (unsigned long)(void *)mod->arch.ftrace_trampoline; #else /* CONFIG_ARM64_MODULE_PLTS */ return -EINVAL; #endif /* CONFIG_ARM64_MODULE_PLTS */ diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index ebff6c155cac29..ea640f92fe5ada 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -120,6 +120,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, unsigned long core_plts = 0; unsigned long init_plts = 0; Elf64_Sym *syms = NULL; + Elf_Shdr *tramp = NULL; int i; /* @@ -131,6 +132,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, mod->arch.core.plt = sechdrs + i; else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt")) mod->arch.init.plt = sechdrs + i; + else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && + !strcmp(secstrings + sechdrs[i].sh_name, + ".text.ftrace_trampoline")) + tramp = sechdrs + i; else if (sechdrs[i].sh_type == SHT_SYMTAB) syms = (Elf64_Sym *)sechdrs[i].sh_addr; } @@ -181,5 +186,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, mod->arch.init.plt_num_entries = 0; mod->arch.init.plt_max_entries = init_plts; + if (tramp) { + tramp->sh_type = SHT_NOBITS; + tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + tramp->sh_addralign = __alignof__(struct plt_entry); + tramp->sh_size = sizeof(struct plt_entry); + } + return 0; } diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds index f7c9781a9d48b4..22e36a21c11345 100644 --- a/arch/arm64/kernel/module.lds +++ b/arch/arm64/kernel/module.lds @@ -1,4 +1,5 @@ SECTIONS { .plt (NOLOAD) : { BYTE(0) } .init.plt (NOLOAD) : { BYTE(0) } + .text.ftrace_trampoline (NOLOAD) : { BYTE(0) } } From 3bfbf2b1bc18f76dc88064b4d6354b2e4424478c Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Wed, 22 Nov 2017 23:02:07 +0530 Subject: [PATCH 0284/3326] powerpc/powernv: Fix kexec crashes caused by tlbie tracing commit a3961f824cdbe7eb431254dc7d8f6f6767f474aa upstream. Rebooting into a new kernel with kexec fails in trace_tlbie() which is called from native_hpte_clear(). This happens if the running kernel has CONFIG_LOCKDEP enabled. With lockdep enabled, the tracepoints always execute few RCU checks regardless of whether tracing is on or off. We are already in the last phase of kexec sequence in real mode with HILE_BE set. At this point the RCU check ends up in RCU_LOCKDEP_WARN and causes kexec to fail. Fix this by not calling trace_tlbie() from native_hpte_clear(). mpe: It's not safe to call trace points at this point in the kexec path, even if we could avoid the RCU checks/warnings. The only solution is to not call them. Fixes: 0428491cba92 ("powerpc/mm: Trace tlbie(l) instructions") Signed-off-by: Mahesh Salgaonkar Reported-by: Aneesh Kumar K.V Suggested-by: Michael Ellerman Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/mm/hash_native_64.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 3848af167df9de..640cf566e98653 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -47,7 +47,8 @@ DEFINE_RAW_SPINLOCK(native_tlbie_lock); -static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) +static inline unsigned long ___tlbie(unsigned long vpn, int psize, + int apsize, int ssize) { unsigned long va; unsigned int penc; @@ -100,7 +101,15 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) : "memory"); break; } - trace_tlbie(0, 0, va, 0, 0, 0, 0); + return va; +} + +static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) +{ + unsigned long rb; + + rb = ___tlbie(vpn, psize, apsize, ssize); + trace_tlbie(0, 0, rb, 0, 0, 0, 0); } static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) @@ -652,7 +661,7 @@ static void native_hpte_clear(void) if (hpte_v & HPTE_V_VALID) { hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); hptep->v = 0; - __tlbie(vpn, psize, apsize, ssize); + ___tlbie(vpn, psize, apsize, ssize); } } From c12e358867d2656b10ccccc36240d6ce93e0e497 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Nov 2017 14:51:02 +1100 Subject: [PATCH 0285/3326] powerpc/kexec: Fix kexec/kdump in P9 guest kernels commit 2621e945fbf1d6df5f3f0ba7be5bae3d2cf9b6a5 upstream. The code that cleans up the IAMR/AMOR before kexec'ing failed to remember that when we're running as a guest AMOR is not writable, it's hypervisor privileged. They symptom is that the kexec stops before entering purgatory and nothing else is seen on the console. If you examine the state of the system all threads will be in the 0x700 program check handler. Fix it by making the write to AMOR dependent on HV mode. Fixes: 1e2a516e89fc ("powerpc/kexec: Fix radix to hash kexec due to IAMR/AMOR") Reported-by: Yilin Zhang Debugged-by: David Gibson Signed-off-by: Michael Ellerman Acked-by: Balbir Singh Reviewed-by: David Gibson Tested-by: David Gibson Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/misc_64.S | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 8ac0bd2bddb0c9..3280953a82cf63 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -623,7 +623,9 @@ BEGIN_FTR_SECTION * NOTE, we rely on r0 being 0 from above. */ mtspr SPRN_IAMR,r0 +BEGIN_FTR_SECTION_NESTED(42) mtspr SPRN_AMOR,r0 +END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) /* save regs for local vars on new stack. From 6c4eaffbad3701f037f1d60ca892dcbe7641ff74 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Sun, 5 Nov 2017 16:11:30 +0200 Subject: [PATCH 0286/3326] KVM: x86: pvclock: Handle first-time write to pvclock-page contains random junk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 51c4b8bba674cfd2260d173602c4dac08e4c3a99 upstream. When guest passes KVM it's pvclock-page GPA via WRMSR to MSR_KVM_SYSTEM_TIME / MSR_KVM_SYSTEM_TIME_NEW, KVM don't initialize pvclock-page to some start-values. It just requests a clock-update which will happen before entering to guest. The clock-update logic will call kvm_setup_pvclock_page() to update the pvclock-page with info. However, kvm_setup_pvclock_page() *wrongly* assumes that the version-field is initialized to an even number. This is wrong because at first-time write, field could be any-value. Fix simply makes sure that if first-time version-field is odd, increment it once more to make it even and only then start standard logic. This follows same logic as done in other pvclock shared-pages (See kvm_write_wall_clock() and record_steal_time()). Signed-off-by: Liran Alon Reviewed-by: Nikita Leshenko Reviewed-by: Konrad Rzeszutek Wilk Signed-off-by: Konrad Rzeszutek Wilk Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/x86.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 03869eb7fcd67b..181106080e41c8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1830,6 +1830,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v) */ BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); + if (guest_hv_clock.version & 1) + ++guest_hv_clock.version; /* first time write, random junk */ + vcpu->hv_clock.version = guest_hv_clock.version + 1; kvm_write_guest_cached(v->kvm, &vcpu->pv_time, &vcpu->hv_clock, From 15cc35bc76f95d22d1e3e42617f6103568f19931 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Sun, 5 Nov 2017 16:56:32 +0200 Subject: [PATCH 0287/3326] KVM: x86: Exit to user-mode on #UD intercept when emulator requires MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 61cb57c9ed631c95b54f8e9090c89d18b3695b3c upstream. Instruction emulation after trapping a #UD exception can result in an MMIO access, for example when emulating a MOVBE on a processor that doesn't support the instruction. In this case, the #UD vmexit handler must exit to user mode, but there wasn't any code to do so. Add it for both VMX and SVM. Signed-off-by: Liran Alon Reviewed-by: Nikita Leshenko Reviewed-by: Konrad Rzeszutek Wilk Signed-off-by: Konrad Rzeszutek Wilk Reviewed-by: Wanpeng Li Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/svm.c | 2 ++ arch/x86/kvm/vmx.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ca209a4a783418..17fb6c6d939adb 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2189,6 +2189,8 @@ static int ud_interception(struct vcpu_svm *svm) int er; er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); + if (er == EMULATE_USER_EXIT) + return 0; if (er != EMULATE_DONE) kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 21cad7068cbfac..b21113bcf227b2 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5914,6 +5914,8 @@ static int handle_exception(struct kvm_vcpu *vcpu) return 1; } er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); + if (er == EMULATE_USER_EXIT) + return 0; if (er != EMULATE_DONE) kvm_queue_exception(vcpu, UD_VECTOR); return 1; From bb98bd9733b5b54b0de67e171b0057dcc6fb98de Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 10 Nov 2017 10:49:38 +0100 Subject: [PATCH 0288/3326] KVM: x86: inject exceptions produced by x86_decode_insn MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 6ea6e84309ca7e0e850b3083e6b09344ee15c290 upstream. Sometimes, a processor might execute an instruction while another processor is updating the page tables for that instruction's code page, but before the TLB shootdown completes. The interesting case happens if the page is in the TLB. In general, the processor will succeed in executing the instruction and nothing bad happens. However, what if the instruction is an MMIO access? If *that* happens, KVM invokes the emulator, and the emulator gets the updated page tables. If the update side had marked the code page as non present, the page table walk then will fail and so will x86_decode_insn. Unfortunately, even though kvm_fetch_guest_virt is correctly returning X86EMUL_PROPAGATE_FAULT, x86_decode_insn's caller treats the failure as a fatal error if the instruction cannot simply be reexecuted (as is the case for MMIO). And this in fact happened sometimes when rebooting Windows 2012r2 guests. Just checking ctxt->have_exception and injecting the exception if true is enough to fix the case. Thanks to Eduardo Habkost for helping in the debugging of this issue. Reported-by: Yanan Fu Cc: Eduardo Habkost Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/x86.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 181106080e41c8..4195cbcdb3102a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5708,6 +5708,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; + if (ctxt->have_exception && inject_emulated_exception(vcpu)) + return EMULATE_DONE; if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu); From 47b35d190b4676953a5ce25572e031d842264194 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Fri, 17 Nov 2017 11:52:49 +0000 Subject: [PATCH 0289/3326] KVM: lapic: Split out x2apic ldr calculation commit e872fa94662d0644057c7c80b3071bdb9249e5ab upstream. Split out the ldr calculation from kvm_apic_set_x2apic_id since we're about to reuse it in the following patch. Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/lapic.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 36c90d631096d8..4991e9e5161198 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -266,9 +266,14 @@ static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) recalculate_apic_map(apic->vcpu->kvm); } +static inline u32 kvm_apic_calc_x2apic_ldr(u32 id) +{ + return ((id >> 4) << 16) | (1 << (id & 0xf)); +} + static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id) { - u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf)); + u32 ldr = kvm_apic_calc_x2apic_ldr(id); WARN_ON_ONCE(id != apic->vcpu->vcpu_id); From 81b3019c35d650e7570ff016ba0732ba6d62c466 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Fri, 17 Nov 2017 11:52:50 +0000 Subject: [PATCH 0290/3326] KVM: lapic: Fixup LDR on load in x2apic commit 12806ba937382fdfdbad62a399aa2dce65c10fcd upstream. In x2apic mode the LDR is fixed based on the ID rather than separately loadable like it was before x2. When kvm_apic_set_state is called, the base is set, and if it has the X2APIC_ENABLE flag set then the LDR is calculated; however that value gets overwritten by the memcpy a few lines below overwriting it with the value that came from userland. The symptom is a lack of EOI after loading the state (e.g. after a QEMU migration) and is due to the EOI bitmap being wrong due to the incorrect LDR. This was seen with a Win2016 guest under Qemu with irqchip=split whose USB mouse didn't work after a VM migration. This corresponds to RH bug: https://bugzilla.redhat.com/show_bug.cgi?id=1502591 Reported-by: Yiqian Wei Signed-off-by: Dr. David Alan Gilbert [Applied fixup from Liran Alon. - Paolo] Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/lapic.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 4991e9e5161198..ef03efba1c2324 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2201,6 +2201,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, { if (apic_x2apic_mode(vcpu->arch.apic)) { u32 *id = (u32 *)(s->regs + APIC_ID); + u32 *ldr = (u32 *)(s->regs + APIC_LDR); if (vcpu->kvm->arch.x2apic_format) { if (*id != vcpu->vcpu_id) @@ -2211,6 +2212,10 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, else *id <<= 24; } + + /* In x2APIC mode, the LDR is fixed and based on the id */ + if (set) + *ldr = kvm_apic_calc_x2apic_ldr(*id); } return 0; From 4162b2973c1c6e49d7b193e572dec691233115c3 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Mon, 27 Nov 2017 11:28:50 +0100 Subject: [PATCH 0291/3326] mmc: sdhci: Avoid swiotlb buffer being full commit 250dcd11466e06df64b92520e2c56bdae453581b upstream. The commit de3ee99b097d ("mmc: Delete bounce buffer handling") deletes the bounce buffer handling, but also causes the max_req_size for sdhci to be increased, in case when max_segs == 1. This causes errors for sdhci-pci Ricoh variant, about the swiotlb buffer to become full. Fix the issue, by taking IO_TLB_SEGSIZE and IO_TLB_SHIFT into account when deciding the max_req_size for sdhci. Reported-by: Jiri Slaby Fixes: de3ee99b097d ("mmc: Delete bounce buffer handling") Signed-off-by: Ulf Hansson Tested-by: Jiri Slaby Acked-by: Adrian Hunter Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/sdhci.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 0d5fcca18c9ec1..6152e83ff93528 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -3650,23 +3651,30 @@ int sdhci_setup_host(struct sdhci_host *host) spin_lock_init(&host->lock); + /* + * Maximum number of sectors in one transfer. Limited by SDMA boundary + * size (512KiB). Note some tuning modes impose a 4MiB limit, but this + * is less anyway. + */ + mmc->max_req_size = 524288; + /* * Maximum number of segments. Depends on if the hardware * can do scatter/gather or not. */ - if (host->flags & SDHCI_USE_ADMA) + if (host->flags & SDHCI_USE_ADMA) { mmc->max_segs = SDHCI_MAX_SEGS; - else if (host->flags & SDHCI_USE_SDMA) + } else if (host->flags & SDHCI_USE_SDMA) { mmc->max_segs = 1; - else /* PIO */ + if (swiotlb_max_segment()) { + unsigned int max_req_size = (1 << IO_TLB_SHIFT) * + IO_TLB_SEGSIZE; + mmc->max_req_size = min(mmc->max_req_size, + max_req_size); + } + } else { /* PIO */ mmc->max_segs = SDHCI_MAX_SEGS; - - /* - * Maximum number of sectors in one transfer. Limited by SDMA boundary - * size (512KiB). Note some tuning modes impose a 4MiB limit, but this - * is less anyway. - */ - mmc->max_req_size = 524288; + } /* * Maximum segment size. Could be one segment with the maximum number From 0d8d2b6373c19f85bab6af1702563b50e17b2cf6 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 21 Nov 2017 15:42:27 +0200 Subject: [PATCH 0292/3326] mmc: block: Fix missing blk_put_request() commit 34c089e806793a66e450b11bd167db6047399fcd upstream. Ensure blk_get_request() is paired with blk_put_request(). Fixes: 0493f6fe5bde ("mmc: block: Move boot partition locking into a driver op") Fixes: 627c3ccfb46a ("mmc: debugfs: Move block debugfs into block module") Signed-off-by: Adrian Hunter Reviewed-by: Linus Walleij Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/core/block.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 2ad7b5c691569e..e17994318562de 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -207,6 +207,7 @@ static ssize_t power_ro_lock_store(struct device *dev, req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; blk_execute_rq(mq->queue, NULL, req, 0); ret = req_to_mmc_queue_req(req)->drv_op_result; + blk_put_request(req); if (!ret) { pr_info("%s: Locking boot partition ro until next power on\n", @@ -2321,6 +2322,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val) *val = ret; ret = 0; } + blk_put_request(req); return ret; } @@ -2351,6 +2353,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp) req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; blk_execute_rq(mq->queue, NULL, req, 0); err = req_to_mmc_queue_req(req)->drv_op_result; + blk_put_request(req); if (err) { pr_err("FAILED %d\n", err); goto out_free; From 957c27d45611ad4ef924840346bc338b8f3c870b Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 21 Nov 2017 15:42:28 +0200 Subject: [PATCH 0293/3326] mmc: block: Check return value of blk_get_request() commit fb8e456e547ed2c699f64665bd8a3b9bde7b9728 upstream. blk_get_request() can fail, always check the return value. Fixes: 0493f6fe5bde ("mmc: block: Move boot partition locking into a driver op") Fixes: 3ecd8cf23f88 ("mmc: block: move multi-ioctl() to use block layer") Fixes: 614f0388f580 ("mmc: block: move single ioctl() commands to block requests") Fixes: 627c3ccfb46a ("mmc: debugfs: Move block debugfs into block module") Signed-off-by: Adrian Hunter Reviewed-by: Linus Walleij Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/core/block.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index e17994318562de..a20e29abb374b5 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -204,6 +204,10 @@ static ssize_t power_ro_lock_store(struct device *dev, /* Dispatch locking to the block layer */ req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM); + if (IS_ERR(req)) { + count = PTR_ERR(req); + goto out_put; + } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; blk_execute_rq(mq->queue, NULL, req, 0); ret = req_to_mmc_queue_req(req)->drv_op_result; @@ -220,7 +224,7 @@ static ssize_t power_ro_lock_store(struct device *dev, set_disk_ro(part_md->disk, 1); } } - +out_put: mmc_blk_put(md); return count; } @@ -581,6 +585,10 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, req = blk_get_request(mq->queue, idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto cmd_done; + } idatas[0] = idata; req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; req_to_mmc_queue_req(req)->drv_op_data = idatas; @@ -644,6 +652,10 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, req = blk_get_request(mq->queue, idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto cmd_err; + } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; req_to_mmc_queue_req(req)->drv_op_data = idata; req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; @@ -2315,6 +2327,8 @@ static int mmc_dbg_card_status_get(void *data, u64 *val) /* Ask the block layer about the card status */ req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + if (IS_ERR(req)) + return PTR_ERR(req); req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; blk_execute_rq(mq->queue, NULL, req, 0); ret = req_to_mmc_queue_req(req)->drv_op_result; @@ -2349,6 +2363,10 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp) /* Ask the block layer for the EXT CSD */ req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out_free; + } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; blk_execute_rq(mq->queue, NULL, req, 0); From 94497458fbb519dbc230e5156f58d4b3a49ba0c1 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 21 Nov 2017 15:42:29 +0200 Subject: [PATCH 0294/3326] mmc: core: Do not leave the block driver in a suspended state commit ebe7dd45cf49e3b49cacbaace17f9f878f21fbea upstream. The block driver must be resumed if the mmc bus fails to suspend the card. Signed-off-by: Adrian Hunter Reviewed-by: Linus Walleij Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/core/bus.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 301246513a3709..7f428e387de3f2 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -157,6 +157,9 @@ static int mmc_bus_suspend(struct device *dev) return ret; ret = host->bus_ops->suspend(host); + if (ret) + pm_generic_resume(dev); + return ret; } From 64d1df6f018e728774bdf5d1aeb576d1c8d1872d Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 21 Nov 2017 15:42:30 +0200 Subject: [PATCH 0295/3326] mmc: block: Ensure that debugfs files are removed commit f9f0da98819503b06b35e61869d18cf3a8cd3323 upstream. The card is not necessarily being removed, but the debugfs files must be removed when the driver is removed, otherwise they will continue to exist after unbinding the card from the driver. e.g. # echo "mmc1:0001" > /sys/bus/mmc/drivers/mmcblk/unbind # cat /sys/kernel/debug/mmc1/mmc1\:0001/ext_csd [ 173.634584] BUG: unable to handle kernel NULL pointer dereference at 0000000000000050 [ 173.643356] IP: mmc_ext_csd_open+0x5e/0x170 A complication is that the debugfs_root may have already been removed, so check for that too. Fixes: 627c3ccfb46a ("mmc: debugfs: Move block debugfs into block module") Signed-off-by: Adrian Hunter Reviewed-by: Linus Walleij Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/core/block.c | 44 ++++++++++++++++++++++++++++++++------ drivers/mmc/core/debugfs.c | 1 + 2 files changed, 38 insertions(+), 7 deletions(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index a20e29abb374b5..ccb516f18d72e2 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -119,6 +119,10 @@ struct mmc_blk_data { struct device_attribute force_ro; struct device_attribute power_ro_lock; int area_type; + + /* debugfs files (only in main mmc_blk_data) */ + struct dentry *status_dentry; + struct dentry *ext_csd_dentry; }; static DEFINE_MUTEX(open_lock); @@ -2417,7 +2421,7 @@ static const struct file_operations mmc_dbg_ext_csd_fops = { .llseek = default_llseek, }; -static int mmc_blk_add_debugfs(struct mmc_card *card) +static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) { struct dentry *root; @@ -2427,28 +2431,53 @@ static int mmc_blk_add_debugfs(struct mmc_card *card) root = card->debugfs_root; if (mmc_card_mmc(card) || mmc_card_sd(card)) { - if (!debugfs_create_file("status", S_IRUSR, root, card, - &mmc_dbg_card_status_fops)) + md->status_dentry = + debugfs_create_file("status", S_IRUSR, root, card, + &mmc_dbg_card_status_fops); + if (!md->status_dentry) return -EIO; } if (mmc_card_mmc(card)) { - if (!debugfs_create_file("ext_csd", S_IRUSR, root, card, - &mmc_dbg_ext_csd_fops)) + md->ext_csd_dentry = + debugfs_create_file("ext_csd", S_IRUSR, root, card, + &mmc_dbg_ext_csd_fops); + if (!md->ext_csd_dentry) return -EIO; } return 0; } +static void mmc_blk_remove_debugfs(struct mmc_card *card, + struct mmc_blk_data *md) +{ + if (!card->debugfs_root) + return; + + if (!IS_ERR_OR_NULL(md->status_dentry)) { + debugfs_remove(md->status_dentry); + md->status_dentry = NULL; + } + + if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) { + debugfs_remove(md->ext_csd_dentry); + md->ext_csd_dentry = NULL; + } +} #else -static int mmc_blk_add_debugfs(struct mmc_card *card) +static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) { return 0; } +static void mmc_blk_remove_debugfs(struct mmc_card *card, + struct mmc_blk_data *md) +{ +} + #endif /* CONFIG_DEBUG_FS */ static int mmc_blk_probe(struct mmc_card *card) @@ -2488,7 +2517,7 @@ static int mmc_blk_probe(struct mmc_card *card) } /* Add two debugfs entries */ - mmc_blk_add_debugfs(card); + mmc_blk_add_debugfs(card, md); pm_runtime_set_autosuspend_delay(&card->dev, 3000); pm_runtime_use_autosuspend(&card->dev); @@ -2514,6 +2543,7 @@ static void mmc_blk_remove(struct mmc_card *card) { struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + mmc_blk_remove_debugfs(card, md); mmc_blk_remove_parts(card, md); pm_runtime_get_sync(&card->dev); mmc_claim_host(card->host); diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index 01e459a34f3321..0f4a7d7b262614 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -314,4 +314,5 @@ void mmc_add_card_debugfs(struct mmc_card *card) void mmc_remove_card_debugfs(struct mmc_card *card) { debugfs_remove_recursive(card->debugfs_root); + card->debugfs_root = NULL; } From e6aa6389263d0db52ea674ed45eb591d21585eca Mon Sep 17 00:00:00 2001 From: Bastian Stender Date: Tue, 28 Nov 2017 09:24:06 +0100 Subject: [PATCH 0296/3326] mmc: core: prepend 0x to pre_eol_info entry in sysfs commit 80a780a167d9267c72867b806142bd6ec69ba123 upstream. The sysfs entry "pre_eol_info" was missing the 0x prefix to identify it as hex formatted. Fixes: 46bc5c408e4e ("mmc: core: Export device lifetime information through sysfs") Signed-off-by: Bastian Stender Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/core/mmc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 36217ad5e9b1fb..b935ad895701ed 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -780,7 +780,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv); -MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info); +MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info); MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n", card->ext_csd.device_life_time_est_typ_a, card->ext_csd.device_life_time_est_typ_b); From 1ef1173c4864c06b6104f386c2694aa740cf02ee Mon Sep 17 00:00:00 2001 From: Bastian Stender Date: Tue, 28 Nov 2017 09:24:07 +0100 Subject: [PATCH 0297/3326] mmc: core: prepend 0x to OCR entry in sysfs commit c892b0d81705c566f575e489efc3c50762db1bde upstream. The sysfs entry "ocr" was missing the 0x prefix to identify it as hex formatted. Fixes: 5fb06af7a33b ("mmc: core: Extend sysfs with OCR register") Signed-off-by: Bastian Stender [Ulf: Amended change to also cover SD-cards] Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/core/mmc.c | 2 +- drivers/mmc/core/sd.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index b935ad895701ed..bad5c1bf4ed9f8 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -790,7 +790,7 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n", MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors); -MMC_DEV_ATTR(ocr, "%08x\n", card->ocr); +MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr); MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en); static ssize_t mmc_fwrev_show(struct device *dev, diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 4fd1620b732d09..eb9de213496799 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -675,7 +675,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); -MMC_DEV_ATTR(ocr, "%08x\n", card->ocr); +MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr); static ssize_t mmc_dsr_show(struct device *dev, From 450d75882552d59bb03b1ccb0999e98f5b227532 Mon Sep 17 00:00:00 2001 From: Lv Zheng Date: Tue, 26 Sep 2017 16:54:09 +0800 Subject: [PATCH 0298/3326] ACPI / EC: Fix regression related to PM ops support in ECDT device commit a64a62ce9a380213dc9e192f762266d70c9b40ec upstream. On platforms (ASUS X550ZE and possibly all ASUS X series) with valid ECDT EC but invalid DSDT EC, EC PM ops won't be invoked as ECDT EC is not an ACPI device. Thus the following commit actually removed post-resume acpi_ec_enable_event() invocation for such platforms, and triggered a regression on them that after being resumed, EC (actually should be ECDT) driver stops handling EC events: Commit: c2b46d679b30c5c0d7eb47a21085943242bdd8dc Subject: ACPI / EC: Add PM operations to improve event handling for resume process Notice that the root cause actually is "ECDT is not an ACPI device" rather than "the timing of acpi_ec_enable_event() invocation", this patch fixes this issue by enumerating ECDT EC as an ACPI device. Due to the existence of the noirq stage, the ability of tuning the timing of acpi_ec_enable_event() invocation is still meaningful. This patch is a little bit different from the posted fix by moving acpi_config_boot_ec() from acpi_ec_ecdt_start() to acpi_ec_add() to make sure that EC event handling won't be stopped as long as the ACPI EC driver is bound. Thus the following sequence shouldn't disable EC event handling: unbind,suspend,resume,bind. Fixes: c2b46d679b30 (ACPI / EC: Add PM operations to improve event handling for resume process) Link: https://bugzilla.kernel.org/show_bug.cgi?id=196847 Reported-by: Luya Tshimbalanga Tested-by: Luya Tshimbalanga Signed-off-by: Lv Zheng Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/ec.c | 69 ++++++++++++++++++++++++------------- drivers/acpi/internal.h | 1 + drivers/acpi/scan.c | 21 +++++++++++ include/acpi/acpi_bus.h | 1 + include/acpi/acpi_drivers.h | 1 + 5 files changed, 69 insertions(+), 24 deletions(-) diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 82b3ce5e937e8b..df842465634a9d 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1597,32 +1597,41 @@ static int acpi_ec_add(struct acpi_device *device) { struct acpi_ec *ec = NULL; int ret; + bool is_ecdt = false; + acpi_status status; strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_EC_CLASS); - ec = acpi_ec_alloc(); - if (!ec) - return -ENOMEM; - if (ec_parse_device(device->handle, 0, ec, NULL) != - AE_CTRL_TERMINATE) { + if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) { + is_ecdt = true; + ec = boot_ec; + } else { + ec = acpi_ec_alloc(); + if (!ec) + return -ENOMEM; + status = ec_parse_device(device->handle, 0, ec, NULL); + if (status != AE_CTRL_TERMINATE) { ret = -EINVAL; goto err_alloc; + } } if (acpi_is_boot_ec(ec)) { - boot_ec_is_ecdt = false; - /* - * Trust PNP0C09 namespace location rather than ECDT ID. - * - * But trust ECDT GPE rather than _GPE because of ASUS quirks, - * so do not change boot_ec->gpe to ec->gpe. - */ - boot_ec->handle = ec->handle; - acpi_handle_debug(ec->handle, "duplicated.\n"); - acpi_ec_free(ec); - ec = boot_ec; - ret = acpi_config_boot_ec(ec, ec->handle, true, false); + boot_ec_is_ecdt = is_ecdt; + if (!is_ecdt) { + /* + * Trust PNP0C09 namespace location rather than + * ECDT ID. But trust ECDT GPE rather than _GPE + * because of ASUS quirks, so do not change + * boot_ec->gpe to ec->gpe. + */ + boot_ec->handle = ec->handle; + acpi_handle_debug(ec->handle, "duplicated.\n"); + acpi_ec_free(ec); + ec = boot_ec; + } + ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt); } else ret = acpi_ec_setup(ec, true); if (ret) @@ -1635,8 +1644,10 @@ static int acpi_ec_add(struct acpi_device *device) ret = !!request_region(ec->command_addr, 1, "EC cmd"); WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); - /* Reprobe devices depending on the EC */ - acpi_walk_dep_device_list(ec->handle); + if (!is_ecdt) { + /* Reprobe devices depending on the EC */ + acpi_walk_dep_device_list(ec->handle); + } acpi_handle_debug(ec->handle, "enumerated.\n"); return 0; @@ -1692,6 +1703,7 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context) static const struct acpi_device_id ec_device_ids[] = { {"PNP0C09", 0}, + {ACPI_ECDT_HID, 0}, {"", 0}, }; @@ -1764,11 +1776,14 @@ static int __init acpi_ec_ecdt_start(void) * Note: ec->handle can be valid if this function is called after * acpi_ec_add(), hence the fast path. */ - if (boot_ec->handle != ACPI_ROOT_OBJECT) - handle = boot_ec->handle; - else if (!acpi_ec_ecdt_get_handle(&handle)) - return -ENODEV; - return acpi_config_boot_ec(boot_ec, handle, true, true); + if (boot_ec->handle == ACPI_ROOT_OBJECT) { + if (!acpi_ec_ecdt_get_handle(&handle)) + return -ENODEV; + boot_ec->handle = handle; + } + + /* Register to ACPI bus with PM ops attached */ + return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC); } #if 0 @@ -2020,6 +2035,12 @@ int __init acpi_ec_init(void) /* Drivers must be started after acpi_ec_query_init() */ dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); + /* + * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is + * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT + * settings but invalid DSDT settings. + * https://bugzilla.kernel.org/show_bug.cgi?id=196847 + */ ecdt_fail = acpi_ec_ecdt_start(); return ecdt_fail && dsdt_fail ? -ENODEV : 0; } diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 4361c4415b4f4c..ede83d38beed50 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -115,6 +115,7 @@ bool acpi_device_is_present(const struct acpi_device *adev); bool acpi_device_is_battery(struct acpi_device *adev); bool acpi_device_is_first_physical_node(struct acpi_device *adev, const struct device *dev); +int acpi_bus_register_early_device(int type); /* -------------------------------------------------------------------------- Device Matching and Notification diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 602f8ff212f2c6..2f2f50322ffb7f 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1024,6 +1024,9 @@ static void acpi_device_get_busid(struct acpi_device *device) case ACPI_BUS_TYPE_SLEEP_BUTTON: strcpy(device->pnp.bus_id, "SLPF"); break; + case ACPI_BUS_TYPE_ECDT_EC: + strcpy(device->pnp.bus_id, "ECDT"); + break; default: acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer); /* Clean up trailing underscores (if any) */ @@ -1304,6 +1307,9 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, case ACPI_BUS_TYPE_SLEEP_BUTTON: acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF); break; + case ACPI_BUS_TYPE_ECDT_EC: + acpi_add_id(pnp, ACPI_ECDT_HID); + break; } } @@ -2049,6 +2055,21 @@ void acpi_bus_trim(struct acpi_device *adev) } EXPORT_SYMBOL_GPL(acpi_bus_trim); +int acpi_bus_register_early_device(int type) +{ + struct acpi_device *device = NULL; + int result; + + result = acpi_add_single_object(&device, NULL, + type, ACPI_STA_DEFAULT); + if (result) + return result; + + device->flags.match_driver = true; + return device_attach(&device->dev); +} +EXPORT_SYMBOL_GPL(acpi_bus_register_early_device); + static int acpi_bus_scan_fixed(void) { int result = 0; diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index fa1505292f6cda..324a04df3785b9 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -105,6 +105,7 @@ enum acpi_bus_device_type { ACPI_BUS_TYPE_THERMAL, ACPI_BUS_TYPE_POWER_BUTTON, ACPI_BUS_TYPE_SLEEP_BUTTON, + ACPI_BUS_TYPE_ECDT_EC, ACPI_BUS_DEVICE_TYPE_COUNT }; diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index 29c691265b4935..14499757338f65 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h @@ -58,6 +58,7 @@ #define ACPI_VIDEO_HID "LNXVIDEO" #define ACPI_BAY_HID "LNXIOBAY" #define ACPI_DOCK_HID "LNXDOCK" +#define ACPI_ECDT_HID "LNXEC" /* Quirk for broken IBM BIOSes */ #define ACPI_SMBUS_IBM_HID "SMBUSIBM" From 1ad53c57966a1a295c0ea5a8a7d6e85d3d75253a Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Mon, 27 Nov 2017 20:46:22 +0100 Subject: [PATCH 0299/3326] eeprom: at24: fix reading from 24MAC402/24MAC602 commit 644a1f19c6c8393d0c4168a5adf79056da6822eb upstream. Chip datasheet mentions that word addresses other than the actual start position of the MAC delivers undefined results. So fix this. Current implementation doesn't work due to this wrong offset. Fixes: 0b813658c115 ("eeprom: at24: add support for at24mac series") Signed-off-by: Heiner Kallweit Signed-off-by: Bartosz Golaszewski Signed-off-by: Greg Kroah-Hartman --- drivers/misc/eeprom/at24.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 764ff5df0dbc34..bfd1a6f4ebf697 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -365,7 +365,8 @@ static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf, memset(msg, 0, sizeof(msg)); msg[0].addr = client->addr; msg[0].buf = addrbuf; - addrbuf[0] = 0x90 + offset; + /* EUI-48 starts from 0x9a, EUI-64 from 0x98 */ + addrbuf[0] = 0xa0 - at24->chip.byte_len + offset; msg[0].len = 1; msg[1].addr = client->addr; msg[1].flags = I2C_M_RD; From 656155d07658e84ac6ab6f6eceac67b807d6fedb Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 27 Nov 2017 22:06:13 +0100 Subject: [PATCH 0300/3326] eeprom: at24: correctly set the size for at24mac402 commit 5478e478eee3b096b8d998d4ed445da30da2dfbc upstream. There's an ilog2() expansion in AT24_DEVICE_MAGIC() which rounds down the actual size of EUI-48 byte array in at24mac402 eeproms to 4 from 6, making it impossible to read it all. Fix it by manually adjusting the value in probe(). This patch contains a temporary fix that is suitable for stable branches. Eventually we'll probably remove the call to ilog2() while converting the magic values to actual structs. Fixes: 0b813658c115 ("eeprom: at24: add support for at24mac series") Signed-off-by: Bartosz Golaszewski Reviewed-by: Andy Shevchenko Signed-off-by: Greg Kroah-Hartman --- drivers/misc/eeprom/at24.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index bfd1a6f4ebf697..e0d018e8bb8e14 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -632,6 +632,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) dev_warn(&client->dev, "page_size looks suspicious (no power of 2)!\n"); + /* + * REVISIT: the size of the EUI-48 byte array is 6 in at24mac402, while + * the call to ilog2() in AT24_DEVICE_MAGIC() rounds it down to 4. + * + * Eventually we'll get rid of the magic values altoghether in favor of + * real structs, but for now just manually set the right size. + */ + if (chip.flags & AT24_FLAG_MAC && chip.byte_len == 4) + chip.byte_len = 6; + /* Use I2C operations unless we're stuck with SMBus extensions. */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { if (chip.flags & AT24_FLAG_ADDR16) From 52d9bb958f080f0a548139676dbf6a87a40af2a0 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Fri, 24 Nov 2017 07:47:50 +0100 Subject: [PATCH 0301/3326] eeprom: at24: check at24_read/write arguments commit d9bcd462daf34aebb8de9ad7f76de0198bb5a0f0 upstream. So far we completely rely on the caller to provide valid arguments. To be on the safe side perform an own sanity check. Signed-off-by: Heiner Kallweit Signed-off-by: Bartosz Golaszewski Signed-off-by: Greg Kroah-Hartman --- drivers/misc/eeprom/at24.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index e0d018e8bb8e14..372b2060fbba1e 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -507,6 +507,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count) if (unlikely(!count)) return count; + if (off + count > at24->chip.byte_len) + return -EINVAL; + /* * Read data from chip, protecting against concurrent updates * from this host, but not from other I2C masters. @@ -539,6 +542,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) if (unlikely(!count)) return -EINVAL; + if (off + count > at24->chip.byte_len) + return -EINVAL; + /* * Write data to chip, protecting against concurrent updates * from this host, but not from other I2C masters. From 9c71f896bb8f3b7bb228ece70341b26b2ff32ae9 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 22 Nov 2017 12:28:17 +0100 Subject: [PATCH 0302/3326] i2c: i801: Fix Failed to allocate irq -2147483648 error commit 6e0c9507bf51e1517a80ad0ac171e5402528fcef upstream. On Apollo Lake devices the BIOS does not set up IRQ routing for the i801 SMBUS controller IRQ, so we end up with dev->irq set to IRQ_NOTCONNECTED. Detect this and do not try to use the irq in this case silencing: i801_smbus 0000:00:1f.1: Failed to allocate irq -2147483648: -107 BugLink: https://communities.intel.com/thread/114759 Signed-off-by: Hans de Goede Reviewed-by: Jean Delvare Signed-off-by: Wolfram Sang Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-i801.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 9e12a53ef7b8cf..8eac00efadc1ad 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1617,6 +1617,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) /* Default timeout in interrupt mode: 200 ms */ priv->adapter.timeout = HZ / 5; + if (dev->irq == IRQ_NOTCONNECTED) + priv->features &= ~FEATURE_IRQ; + if (priv->features & FEATURE_IRQ) { u16 pcictl, pcists; From 6e8aca2095790b90217b759a0d5e2ec5138bbfd2 Mon Sep 17 00:00:00 2001 From: Vaibhav Jain Date: Thu, 23 Nov 2017 09:08:57 +0530 Subject: [PATCH 0303/3326] cxl: Check if vphb exists before iterating over AFU devices commit 12841f87b7a8ceb3d54f171660f72a86941bfcb3 upstream. During an eeh a kernel-oops is reported if no vPHB is allocated to the AFU. This happens as during AFU init, an error in creation of vPHB is a non-fatal error. Hence afu->phb should always be checked for NULL before iterating over it for the virtual AFU pci devices. This patch fixes the kenel-oops by adding a NULL pointer check for afu->phb before it is dereferenced. Fixes: 9e8df8a21963 ("cxl: EEH support") Signed-off-by: Vaibhav Jain Acked-by: Andrew Donnellan Acked-by: Frederic Barrat Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cxl/pci.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 3ba04f371380d2..81093f8157a9e7 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -2043,6 +2043,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu, /* There should only be one entry, but go through the list * anyway */ + if (afu->phb == NULL) + return result; + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { if (!afu_dev->driver) continue; @@ -2084,8 +2087,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, * Tell the AFU drivers; but we don't care what they * say, we're going away. */ - if (afu->phb != NULL) - cxl_vphb_error_detected(afu, state); + cxl_vphb_error_detected(afu, state); } return PCI_ERS_RESULT_DISCONNECT; } @@ -2225,6 +2227,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) if (cxl_afu_select_best_mode(afu)) goto err; + if (afu->phb == NULL) + continue; + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { /* Reset the device context. * TODO: make this less disruptive @@ -2287,6 +2292,9 @@ static void cxl_pci_resume(struct pci_dev *pdev) for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; + if (afu->phb == NULL) + continue; + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { if (afu_dev->driver && afu_dev->driver->err_handler && afu_dev->driver->err_handler->resume) From f5b41b366273ff51c7a62643f8667784eb349fba Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Fri, 24 Nov 2017 15:14:25 -0800 Subject: [PATCH 0304/3326] bcache: Fix building error on MIPS commit cf33c1ee5254c6a430bc1538232b49c3ea13e613 upstream. This patch try to fix the building error on MIPS. The reason is MIPS has already defined the PTR macro, which conflicts with the PTR macro in include/uapi/linux/bcache.h. [fixed by mlyle: corrected a line-length issue] Signed-off-by: Huacai Chen Reviewed-by: Michael Lyle Signed-off-by: Michael Lyle Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/md/bcache/alloc.c | 2 +- drivers/md/bcache/extents.c | 2 +- drivers/md/bcache/journal.c | 2 +- include/uapi/linux/bcache.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index c9934139d6096d..934b1fce4ce1f0 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -480,7 +480,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, if (b == -1) goto err; - k->ptr[i] = PTR(ca->buckets[b].gen, + k->ptr[i] = MAKE_PTR(ca->buckets[b].gen, bucket_to_sector(c, b), ca->sb.nr_this_dev); diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 41c238fc373380..f9d391711595fb 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -585,7 +585,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey return false; for (i = 0; i < KEY_PTRS(l); i++) - if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || + if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) return false; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 02a98ddb592d3b..03cc0722ae4860 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -507,7 +507,7 @@ static void journal_reclaim(struct cache_set *c) continue; ja->cur_idx = next; - k->ptr[n++] = PTR(0, + k->ptr[n++] = MAKE_PTR(0, bucket_to_sector(c, ca->sb.d[ja->cur_idx]), ca->sb.nr_this_dev); } diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h index 90fc490f973f9e..821f71a2e48fa6 100644 --- a/include/uapi/linux/bcache.h +++ b/include/uapi/linux/bcache.h @@ -91,7 +91,7 @@ PTR_FIELD(PTR_GEN, 0, 8) #define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1) -#define PTR(gen, offset, dev) \ +#define MAKE_PTR(gen, offset, dev) \ ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen) /* Bkey utility code */ From 260c6625c85e7e1a5db0b5ece9944a4010c9bade Mon Sep 17 00:00:00 2001 From: Coly Li Date: Mon, 30 Oct 2017 14:46:31 -0700 Subject: [PATCH 0305/3326] bcache: only permit to recovery read error when cache device is clean commit d59b23795933678c9638fd20c942d2b4f3cd6185 upstream. When bcache does read I/Os, for example in writeback or writethrough mode, if a read request on cache device is failed, bcache will try to recovery the request by reading from cached device. If the data on cached device is not synced with cache device, then requester will get a stale data. For critical storage system like database, providing stale data from recovery may result an application level data corruption, which is unacceptible. With this patch, for a failed read request in writeback or writethrough mode, recovery a recoverable read request only happens when cache device is clean. That is to say, all data on cached device is up to update. For other cache modes in bcache, read request will never hit cached_dev_read_error(), they don't need this patch. Please note, because cache mode can be switched arbitrarily in run time, a writethrough mode might be switched from a writeback mode. Therefore checking dc->has_data in writethrough mode still makes sense. Changelog: V4: Fix parens error pointed by Michael Lyle. v3: By response from Kent Oversteet, he thinks recovering stale data is a bug to fix, and option to permit it is unnecessary. So this version the sysfs file is removed. v2: rename sysfs entry from allow_stale_data_on_failure to allow_stale_data_on_failure, and fix the confusing commit log. v1: initial patch posted. [small change to patch comment spelling by mlyle] Signed-off-by: Coly Li Signed-off-by: Michael Lyle Reported-by: Arne Wolf Reviewed-by: Michael Lyle Cc: Kent Overstreet Cc: Nix Cc: Kai Krakow Cc: Eric Wheeler Cc: Junhui Tang Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/md/bcache/request.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 3475d6628e219f..14c269241b3437 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -698,8 +698,16 @@ static void cached_dev_read_error(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); struct bio *bio = &s->bio.bio; + struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - if (s->recoverable) { + /* + * If cache device is dirty (dc->has_dirty is non-zero), then + * recovery a failed read request from cached device may get a + * stale data back. So read failure recovery is only permitted + * when cache device is clean. + */ + if (s->recoverable && + (dc && !atomic_read(&dc->has_dirty))) { /* Retry from the backing device: */ trace_bcache_read_retry(s->orig_bio); From 1cafc451a955c44ec7eba47bae340d9ca5ccb483 Mon Sep 17 00:00:00 2001 From: Rui Hua Date: Fri, 24 Nov 2017 15:14:26 -0800 Subject: [PATCH 0306/3326] bcache: recover data from backing when data is clean commit e393aa2446150536929140739f09c6ecbcbea7f0 upstream. When we send a read request and hit the clean data in cache device, there is a situation called cache read race in bcache(see the commit in the tail of cache_look_up(), the following explaination just copy from there): The bucket we're reading from might be reused while our bio is in flight, and we could then end up reading the wrong data. We guard against this by checking (in bch_cache_read_endio()) if the pointer is stale again; if so, we treat it as an error (s->iop.error = -EINTR) and reread from the backing device (but we don't pass that error up anywhere) It should be noted that cache read race happened under normal circumstances, not the circumstance when SSD failed, it was counted and shown in /sys/fs/bcache/XXX/internal/cache_read_races. Without this patch, when we use writeback mode, we will never reread from the backing device when cache read race happened, until the whole cache device is clean, because the condition (s->recoverable && (dc && !atomic_read(&dc->has_dirty))) is false in cached_dev_read_error(). In this situation, the s->iop.error(= -EINTR) will be passed up, at last, user will receive -EINTR when it's bio end, this is not suitable, and wield to up-application. In this patch, we use s->read_dirty_data to judge whether the read request hit dirty data in cache device, it is safe to reread data from the backing device when the read request hit clean data. This can not only handle cache read race, but also recover data when failed read request from cache device. [edited by mlyle to fix up whitespace, commit log title, comment spelling] Fixes: d59b23795933 ("bcache: only permit to recovery read error when cache device is clean") Signed-off-by: Hua Rui Reviewed-by: Michael Lyle Reviewed-by: Coly Li Signed-off-by: Michael Lyle Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/md/bcache/request.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 14c269241b3437..14d13cab5cda4e 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -698,16 +698,15 @@ static void cached_dev_read_error(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); struct bio *bio = &s->bio.bio; - struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); /* - * If cache device is dirty (dc->has_dirty is non-zero), then - * recovery a failed read request from cached device may get a - * stale data back. So read failure recovery is only permitted - * when cache device is clean. + * If read request hit dirty data (s->read_dirty_data is true), + * then recovery a failed read request from cached device may + * get a stale data back. So read failure recovery is only + * permitted when read request hit clean data in cache device, + * or when cache read race happened. */ - if (s->recoverable && - (dc && !atomic_read(&dc->has_dirty))) { + if (s->recoverable && !s->read_dirty_data) { /* Retry from the backing device: */ trace_bcache_read_retry(s->orig_bio); From de120fc9624162a3507091d6b92dd9037440c9d8 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Mon, 27 Nov 2017 17:31:00 +0100 Subject: [PATCH 0307/3326] hwmon: (jc42) optionally try to disable the SMBUS timeout commit 68615eb01f82256c19e41967bfb3eef902f77033 upstream. With a nxp,se97 chip on an atmel sama5d31 board, the I2C adapter driver is not always capable of avoiding the 25-35 ms timeout as specified by the SMBUS protocol. This may cause silent corruption of the last bit of any transfer, e.g. a one is read instead of a zero if the sensor chip times out. This also affects the eeprom half of the nxp-se97 chip, where this silent corruption was originally noticed. Other I2C adapters probably suffer similar issues, e.g. bit-banging comes to mind as risky... The SMBUS register in the nxp chip is not a standard Jedec register, but it is not special to the nxp chips either, at least the atmel chips have the same mechanism. Therefore, do not special case this on the manufacturer, it is opt-in via the device property anyway. Signed-off-by: Peter Rosin Acked-by: Rob Herring Signed-off-by: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- .../devicetree/bindings/hwmon/jc42.txt | 4 ++++ drivers/hwmon/jc42.c | 21 +++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/Documentation/devicetree/bindings/hwmon/jc42.txt b/Documentation/devicetree/bindings/hwmon/jc42.txt index 07a250498fbb4c..f569db58f64a10 100644 --- a/Documentation/devicetree/bindings/hwmon/jc42.txt +++ b/Documentation/devicetree/bindings/hwmon/jc42.txt @@ -34,6 +34,10 @@ Required properties: - reg: I2C address +Optional properties: +- smbus-timeout-disable: When set, the smbus timeout function will be disabled. + This is not supported on all chips. + Example: temp-sensor@1a { diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 5f11dc014ed619..e5234f953a6d16 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -22,6 +22,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include #include #include #include @@ -45,6 +46,7 @@ static const unsigned short normal_i2c[] = { #define JC42_REG_TEMP 0x05 #define JC42_REG_MANID 0x06 #define JC42_REG_DEVICEID 0x07 +#define JC42_REG_SMBUS 0x22 /* NXP and Atmel, possibly others? */ /* Status bits in temperature register */ #define JC42_ALARM_CRIT_BIT 15 @@ -75,6 +77,9 @@ static const unsigned short normal_i2c[] = { #define GT_MANID 0x1c68 /* Giantec */ #define GT_MANID2 0x132d /* Giantec, 2nd mfg ID */ +/* SMBUS register */ +#define SMBUS_STMOUT BIT(7) /* SMBus time-out, active low */ + /* Supported chips */ /* Analog Devices */ @@ -495,6 +500,22 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id) data->extended = !!(cap & JC42_CAP_RANGE); + if (device_property_read_bool(dev, "smbus-timeout-disable")) { + int smbus; + + /* + * Not all chips support this register, but from a + * quick read of various datasheets no chip appears + * incompatible with the below attempt to disable + * the timeout. And the whole thing is opt-in... + */ + smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS); + if (smbus < 0) + return smbus; + i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS, + smbus | SMBUS_STMOUT); + } + config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG); if (config < 0) return config; From 5e19169a88be15d6c845e93d62e448ee06a14b2f Mon Sep 17 00:00:00 2001 From: Jeff Lien Date: Tue, 21 Nov 2017 10:44:37 -0600 Subject: [PATCH 0308/3326] nvme-pci: add quirk for delay before CHK RDY for WDC SN200 commit 8c97eeccf0ad8783c057830119467b877bdfced7 upstream. And increase the existing delay to cover this device as well. Signed-off-by: Jeff Lien Signed-off-by: Christoph Hellwig Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/host/nvme.h | 2 +- drivers/nvme/host/pci.c | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index d3f3c444751570..044af553204ca4 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -108,7 +108,7 @@ static inline struct nvme_request *nvme_req(struct request *req) * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was * found empirically. */ -#define NVME_QUIRK_DELAY_AMOUNT 2000 +#define NVME_QUIRK_DELAY_AMOUNT 2300 enum nvme_ctrl_state { NVME_CTRL_NEW, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 3f5a04c586cefd..75539f7c58b9a4 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2519,6 +2519,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ From 23506bc7fdb2313c63d8811967fcf82d5764b973 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 14 Nov 2017 17:19:29 -0500 Subject: [PATCH 0309/3326] Revert "drm/radeon: dont switch vt on suspend" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 18c437caa5b18a235dd65cec224eab54bebcee65 upstream. Fixes distorted colors on some cards on resume from suspend. This reverts commit b9729b17a414f99c61f4db9ac9f9ed987fa0cbfe. Bug: https://bugs.freedesktop.org/show_bug.cgi?id=98832 Bug: https://bugs.freedesktop.org/show_bug.cgi?id=99163 Bug: https://bugzilla.kernel.org/show_bug.cgi?id=107001 Reviewed-by: Michel Dänzer Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/radeon/radeon_fb.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index fd25361ac681bb..4ef967d1a9dec9 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -245,7 +245,6 @@ static int radeonfb_create(struct drm_fb_helper *helper, } info->par = rfbdev; - info->skip_vt_switch = true; ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { From 2d8b3a9b7853dbbf12c853674ef4403481ab1513 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 30 Sep 2017 11:13:28 +0300 Subject: [PATCH 0310/3326] drm/amdgpu: potential uninitialized variable in amdgpu_vce_ring_parse_cs() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 40a9960b046290939b56ce8e51f365258f27f264 upstream. We shifted some code around in commit 9cca0b8e5df0 ("drm/amdgpu: move amdgpu_cs_sysvm_access_required into find_mapping") and now my static checker complains that "r" might not be initialized at the end of the function. I've reviewed the code, and that seems possible, but it's also possible I may have missed something. Reviewed-by: Christian König Signed-off-by: Dan Carpenter Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index c855366521abc5..9fc3d387eae3eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -647,7 +647,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) uint32_t allocated = 0; uint32_t tmp, handle = 0; uint32_t *size = &tmp; - int i, r, idx = 0; + int i, r = 0, idx = 0; p->job->vm = NULL; ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); From f182df2eacbe52100b82e193a7917fdbb4312dd1 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 30 Sep 2017 11:14:13 +0300 Subject: [PATCH 0311/3326] drm/amdgpu: Potential uninitialized variable in amdgpu_vm_update_directories() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 78aa02c713fcf19e9bc8511ab61a5fd6c877cc01 upstream. After commit ea09729c9302 ("drm/amdgpu: rework page directory filling v2") then it becomes a lot harder to verify that "r" is initialized. My static checker complains and so I've reviewed the code. It does look like it might be buggy... Anyway, it doesn't hurt to set "r" to zero at the start. Reviewed-by: Christian König Signed-off-by: Dan Carpenter Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index bd20ff01851227..4fb5e14d4879c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1201,7 +1201,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent) int amdgpu_vm_update_directories(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - int r; + int r = 0; r = amdgpu_vm_update_level(adev, vm, &vm->root, 0); if (r) From 402b1de75ce06404c96cf803e1fe0b5c685f7797 Mon Sep 17 00:00:00 2001 From: Ken Wang Date: Fri, 29 Sep 2017 15:41:43 +0800 Subject: [PATCH 0312/3326] drm/amdgpu: correct reference clock value on vega10 commit 76d6172b6fab16455af4b67bb18a3f66011592f8 upstream. Old value from bringup was wrong. Signed-off-by: Ken Wang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/soc15.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f2c3a49f73a005..3e59c766722cfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -279,10 +279,7 @@ static void soc15_init_golden_registers(struct amdgpu_device *adev) } static u32 soc15_get_xclk(struct amdgpu_device *adev) { - if (adev->asic_type == CHIP_VEGA10) - return adev->clock.spll.reference_freq/4; - else - return adev->clock.spll.reference_freq; + return adev->clock.spll.reference_freq; } From a00b7ea3df52da50097d22c9eb3b0eba3e504838 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 31 Oct 2017 09:36:13 +0100 Subject: [PATCH 0313/3326] drm/amdgpu: fix error handling in amdgpu_bo_do_create MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a695e43712242c354748e9bae5d137d4337a7694 upstream. The bo structure is freed up in case of an error, so we can't do any accounting if that happens. Signed-off-by: Christian König Reviewed-by: Michel Dänzer Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 9e495da0bb03c8..ffe4839803623b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -391,6 +391,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, acc_size, sg, resv, &amdgpu_ttm_bo_destroy); + if (unlikely(r != 0)) + return r; + bytes_moved = atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved; if (adev->mc.visible_vram_size < adev->mc.real_vram_size && @@ -400,9 +403,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, else amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0); - if (unlikely(r != 0)) - return r; - if (kernel) bo->tbo.priority = 1; From ecd08e684cd1d138bf894e632e0d141f5e7c4140 Mon Sep 17 00:00:00 2001 From: ozeng Date: Tue, 6 Jun 2017 10:53:28 -0500 Subject: [PATCH 0314/3326] drm/amdgpu: Properly allocate VM invalidate eng v2 commit c5066129af4436ab0da8eefe4289774a5409706d upstream. v1: Properly allocate TLB invalidation engine to avoid conflict. v2: Added comments to codes Signed-off-by: Oak Zeng Reviewed-by: Alex Deucher Acked-by: Christian Konig Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index d04d0b12321203..6dc0f6e346e7fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -395,7 +395,16 @@ static int gmc_v9_0_early_init(void *handle) static int gmc_v9_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 }; + /* + * The latest engine allocation on gfx9 is: + * Engine 0, 1: idle + * Engine 2, 3: firmware + * Engine 4~13: amdgpu ring, subject to change when ring number changes + * Engine 14~15: idle + * Engine 16: kfd tlb invalidation + * Engine 17: Gart flushes + */ + unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; unsigned i; for(i = 0; i < adev->num_rings; ++i) { @@ -408,9 +417,9 @@ static int gmc_v9_0_late_init(void *handle) ring->funcs->vmhub); } - /* Engine 17 is used for GART flushes */ + /* Engine 16 is used for KFD and 17 for GART flushes */ for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) - BUG_ON(vm_inv_eng[i] > 17); + BUG_ON(vm_inv_eng[i] > 16); return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); } From d48221f3399764748f706cc1126b5d6465e27d87 Mon Sep 17 00:00:00 2001 From: Ken Wang Date: Wed, 8 Nov 2017 14:48:50 +0800 Subject: [PATCH 0315/3326] drm/amdgpu: Remove check which is not valid for certain VBIOS commit ab6613b7eaefe85dadfc86025e901c55d71c0379 upstream. Fixes vbios fetching on certain headless boards. Signed-off-by: Ken Wang Acked-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index c21adf60a7f200..057e1ecd83cec5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -59,12 +59,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size) return false; } - tmp = bios[0x18] | (bios[0x19] << 8); - if (bios[tmp + 0x14] != 0x0) { - DRM_INFO("Not an x86 BIOS ROM\n"); - return false; - } - bios_header_start = bios[0x48] | (bios[0x49] << 8); if (!bios_header_start) { DRM_INFO("Can't locate bios header\n"); From 45d1765032b645a2e86a606f7d173d4bf97a9810 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 4 Sep 2017 20:58:45 +0200 Subject: [PATCH 0316/3326] drm/ttm: fix ttm_bo_cleanup_refs_or_queue once more MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 378e2d5b504fe0231c557751e58b80fcf717cc20 upstream. With shared reservation objects __ttm_bo_reserve() can easily fail even on destroyed BOs. This prevents correct handling when we need to individualize the reservation object. Fix this by individualizing the object before even trying to reserve it. Signed-off-by: Christian König Acked-by: Chunming Zhou Signed-off-by: Alex Deucher Signed-off-by: Lyude Paul Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/ttm/ttm_bo.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 180ce629641618..bee77d31895b3d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -440,28 +440,29 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) struct ttm_bo_global *glob = bo->glob; int ret; + ret = ttm_bo_individualize_resv(bo); + if (ret) { + /* Last resort, if we fail to allocate memory for the + * fences block for the BO to become idle + */ + reservation_object_wait_timeout_rcu(bo->resv, true, false, + 30 * HZ); + spin_lock(&glob->lru_lock); + goto error; + } + spin_lock(&glob->lru_lock); ret = __ttm_bo_reserve(bo, false, true, NULL); - if (!ret) { - if (!ttm_bo_wait(bo, false, true)) { + if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) { ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); + if (bo->resv != &bo->ttm_resv) + reservation_object_unlock(&bo->ttm_resv); ttm_bo_cleanup_memtype_use(bo); - return; } - ret = ttm_bo_individualize_resv(bo); - if (ret) { - /* Last resort, if we fail to allocate memory for the - * fences block for the BO to become idle and free it. - */ - spin_unlock(&glob->lru_lock); - ttm_bo_wait(bo, true, true); - ttm_bo_cleanup_memtype_use(bo); - return; - } ttm_bo_flush_all_fences(bo); /* @@ -474,11 +475,12 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ttm_bo_add_to_lru(bo); } - if (bo->resv != &bo->ttm_resv) - reservation_object_unlock(&bo->ttm_resv); __ttm_bo_unreserve(bo); } + if (bo->resv != &bo->ttm_resv) + reservation_object_unlock(&bo->ttm_resv); +error: kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); spin_unlock(&glob->lru_lock); From 509536b227fb35ed34d144ff4280aa78605ab85e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 4 Sep 2017 21:02:45 +0200 Subject: [PATCH 0317/3326] dma-buf: make reservation_object_copy_fences rcu save MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 39e16ba16c147e662bf9fbcee9a99d70d420382f upstream. Stop requiring that the src reservation object is locked for this operation. Acked-by: Chunming Zhou Signed-off-by: Christian König Signed-off-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/1504551766-5093-1-git-send-email-deathsimple@vodafone.de Signed-off-by: Lyude Paul Signed-off-by: Greg Kroah-Hartman --- drivers/dma-buf/reservation.c | 56 ++++++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 14 deletions(-) diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index dec3a815455d67..b44d9d7db347b4 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -266,8 +266,7 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence); * @dst: the destination reservation object * @src: the source reservation object * -* Copy all fences from src to dst. Both src->lock as well as dst-lock must be -* held. +* Copy all fences from src to dst. dst-lock must be held. */ int reservation_object_copy_fences(struct reservation_object *dst, struct reservation_object *src) @@ -277,33 +276,62 @@ int reservation_object_copy_fences(struct reservation_object *dst, size_t size; unsigned i; - src_list = reservation_object_get_list(src); + rcu_read_lock(); + src_list = rcu_dereference(src->fence); +retry: if (src_list) { - size = offsetof(typeof(*src_list), - shared[src_list->shared_count]); + unsigned shared_count = src_list->shared_count; + + size = offsetof(typeof(*src_list), shared[shared_count]); + rcu_read_unlock(); + dst_list = kmalloc(size, GFP_KERNEL); if (!dst_list) return -ENOMEM; - dst_list->shared_count = src_list->shared_count; - dst_list->shared_max = src_list->shared_count; - for (i = 0; i < src_list->shared_count; ++i) - dst_list->shared[i] = - dma_fence_get(src_list->shared[i]); + rcu_read_lock(); + src_list = rcu_dereference(src->fence); + if (!src_list || src_list->shared_count > shared_count) { + kfree(dst_list); + goto retry; + } + + dst_list->shared_count = 0; + dst_list->shared_max = shared_count; + for (i = 0; i < src_list->shared_count; ++i) { + struct dma_fence *fence; + + fence = rcu_dereference(src_list->shared[i]); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fence->flags)) + continue; + + if (!dma_fence_get_rcu(fence)) { + kfree(dst_list); + src_list = rcu_dereference(src->fence); + goto retry; + } + + if (dma_fence_is_signaled(fence)) { + dma_fence_put(fence); + continue; + } + + dst_list->shared[dst_list->shared_count++] = fence; + } } else { dst_list = NULL; } + new = dma_fence_get_rcu_safe(&src->fence_excl); + rcu_read_unlock(); + kfree(dst->staged); dst->staged = NULL; src_list = reservation_object_get_list(dst); - old = reservation_object_get_excl(dst); - new = reservation_object_get_excl(src); - - dma_fence_get(new); preempt_disable(); write_seqcount_begin(&dst->seq); From 99b0ea0391d1b49ed4a944a07ad57ee448b0541c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 13 Oct 2017 17:24:31 +0200 Subject: [PATCH 0318/3326] drm/amdgpu: reserve root PD while releasing it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 2642cf110d08a403f585a051e4cbf45a90b3adea upstream. Otherwise somebody could try to evict it at the same time and try to use half torn down structures. Signed-off-by: Christian König Reviewed-and-Tested-by: Michel Dänzer Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Lyude Paul Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 4fb5e14d4879c9..863c6dd0123a2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2586,7 +2586,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdgpu_bo_va_mapping *mapping, *tmp; bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; - int i; + struct amdgpu_bo *root; + int i, r; amd_sched_entity_fini(vm->entity.sched, &vm->entity); @@ -2609,7 +2610,15 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_vm_free_mapping(adev, vm, mapping, NULL); } - amdgpu_vm_free_levels(&vm->root); + root = amdgpu_bo_ref(vm->root.bo); + r = amdgpu_bo_reserve(root, true); + if (r) { + dev_err(adev->dev, "Leaking page tables because BO reservation failed\n"); + } else { + amdgpu_vm_free_levels(&vm->root); + amdgpu_bo_unreserve(root); + } + amdgpu_bo_unref(&root); dma_fence_put(vm->last_dir_update); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) amdgpu_vm_free_reserved_vmid(adev, vm, i); From 6cd3e4a3e697cca84f27a0af68ba59a6ab00892f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Fri, 3 Nov 2017 16:00:35 +0100 Subject: [PATCH 0319/3326] drm/ttm: Always and only destroy bo->ttm_resv in ttm_bo_release_list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit e1fc12c5d9ad06a2a74e97a91f1b0c5f4c723b50 upstream. Fixes a use-after-free due to a race condition in ttm_bo_cleanup_refs_and_unlock, which allows one task to reserve a BO and destroy its ttm_resv while another task is waiting for it to signal in reservation_object_wait_timeout_rcu. v2: * Always initialize bo->ttm_resv in ttm_bo_init_reserved (Christian König) Fixes: 0d2bd2ae045d "drm/ttm: fix memory leak while individualizing BOs" Reviewed-by: Chunming Zhou # v1 Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher Signed-off-by: Lyude Paul Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/ttm/ttm_bo.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index bee77d31895b3d..c088703777e262 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -150,8 +150,7 @@ static void ttm_bo_release_list(struct kref *list_kref) ttm_tt_destroy(bo->ttm); atomic_dec(&bo->glob->bo_count); dma_fence_put(bo->moving); - if (bo->resv == &bo->ttm_resv) - reservation_object_fini(&bo->ttm_resv); + reservation_object_fini(&bo->ttm_resv); mutex_destroy(&bo->wu_mutex); if (bo->destroy) bo->destroy(bo); @@ -402,14 +401,11 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) if (bo->resv == &bo->ttm_resv) return 0; - reservation_object_init(&bo->ttm_resv); BUG_ON(!reservation_object_trylock(&bo->ttm_resv)); r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv); - if (r) { + if (r) reservation_object_unlock(&bo->ttm_resv); - reservation_object_fini(&bo->ttm_resv); - } return r; } @@ -459,6 +455,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) spin_unlock(&glob->lru_lock); if (bo->resv != &bo->ttm_resv) reservation_object_unlock(&bo->ttm_resv); + ttm_bo_cleanup_memtype_use(bo); return; } @@ -1205,8 +1202,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, lockdep_assert_held(&bo->resv->lock.base); } else { bo->resv = &bo->ttm_resv; - reservation_object_init(&bo->ttm_resv); } + reservation_object_init(&bo->ttm_resv); atomic_inc(&bo->glob->bo_count); drm_vma_node_reset(&bo->vma_node); bo->priority = 0; From 7c33c32d39b9d11338624d4a7bad4e75bf64fbf8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 10 Oct 2017 16:33:22 +0300 Subject: [PATCH 0320/3326] drm/vblank: Fix flip event vblank count MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 632c6e4edef17c40bba3be67c980d959790d142f upstream. On machines where the vblank interrupt fires some time after the start of vblank (or we just manage to race with the vblank interrupt handler) we will currently stuff a stale vblank counter value into the flip event, and thus we'll prematurely complete the flip. Switch over to drm_crtc_accurate_vblank_count() to make sure we have an up to date counter value, crucially also remember to add the +1 so that the delayed vblank interrupt won't complete the flip prematurely. Cc: Daniel Vetter Suggested-by: Daniel Vetter Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20171010133322.24029-1-ville.syrjala@linux.intel.com Reviewed-by: Daniel Vetter #irc Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/drm_vblank.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 70f2b9593edcb1..f14e6c92e3329c 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -869,7 +869,7 @@ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, assert_spin_locked(&dev->event_lock); e->pipe = pipe; - e->event.sequence = drm_vblank_count(dev, pipe); + e->event.sequence = drm_crtc_accurate_vblank_count(crtc) + 1; e->event.crtc_id = crtc->base.id; list_add_tail(&e->base.link, &dev->vblank_event_list); } From a5851fdb17dda5f0854417f15a0c9c833a92dcc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 23 Oct 2017 18:25:40 +0300 Subject: [PATCH 0321/3326] drm/vblank: Tune drm_crtc_accurate_vblank_count() WARN down to a debug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a111fbc4c44d2981f1a8fef64418685be5e30280 upstream. Since commit 632c6e4edef1 ("drm/vblank: Fix flip event vblank count") even drivers that don't implement accurate vblank timestamps will end up using drm_crtc_accurate_vblank_count(). That leads to a WARN every time drm_crtc_arm_vblank_event() gets called. The could be as often as every frame for each active crtc. Considering drm_crtc_accurate_vblank_count() is never any worse than the drm_vblank_count() we used previously, let's just skip the WARN unless DRM_UT_VBL is enabled. That way people won't be bothered by this unless they're debugging vblank code. And let's also change it to WARN_ONCE() so that even when you're debugging vblank code you won't get drowned by constant WARNs. Cc: Daniel Vetter Cc: "Szyprowski, Marek" Cc: Andrzej Hajda Reported-by: Andrzej Hajda Fixes: 632c6e4edef1 ("drm/vblank: Fix flip event vblank count") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20171023152540.15364-1-ville.syrjala@linux.intel.com Acked-by: Benjamin Gaignard Reviewed-by: Daniel Vetter Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/drm_vblank.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index f14e6c92e3329c..17e8ef9a1c1124 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -311,8 +311,8 @@ u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc) u32 vblank; unsigned long flags; - WARN(!dev->driver->get_vblank_timestamp, - "This function requires support for accurate vblank timestamps."); + WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp, + "This function requires support for accurate vblank timestamps."); spin_lock_irqsave(&dev->vblank_time_lock, flags); From e3c5870862d95ff198a1902c3d7bd2a548b89ea7 Mon Sep 17 00:00:00 2001 From: Jyri Sarha Date: Thu, 12 Oct 2017 12:19:46 +0300 Subject: [PATCH 0322/3326] drm/tilcdc: Precalculate total frametime in tilcdc_crtc_set_mode() commit ce99f7206c9105851d97202ed08c058af6f11ac4 upstream. We need the total frame refresh time to check if we are too close to vertical sync when updating the two framebuffer DMA registers and risk a collision. This new method is more accurate that the previous that based on mode's vrefresh value, which itself is inaccurate or may not even be initialized. Reported-by: Kevin Hao Fixes: 11abbc9f39e0 ("drm/tilcdc: Set framebuffer DMA address to HW only if CRTC is enabled") Signed-off-by: Jyri Sarha Reviewed-by: Tomi Valkeinen Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 406fe4544b83d3..06d6e785c92091 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "tilcdc_drv.h" #include "tilcdc_regs.h" @@ -48,6 +49,7 @@ struct tilcdc_crtc { unsigned int lcd_fck_rate; ktime_t last_vblank; + unsigned int hvtotal_us; struct drm_framebuffer *curr_fb; struct drm_framebuffer *next_fb; @@ -292,6 +294,12 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc) LCDC_V2_CORE_CLK_EN); } +uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode) +{ + return (uint) div_u64(1000llu * mode->htotal * mode->vtotal, + mode->clock); +} + static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) { struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); @@ -459,6 +467,9 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) drm_framebuffer_reference(fb); crtc->hwmode = crtc->state->adjusted_mode; + + tilcdc_crtc->hvtotal_us = + tilcdc_mode_hvtotal(&crtc->hwmode); } static void tilcdc_crtc_enable(struct drm_crtc *crtc) @@ -648,7 +659,7 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc, spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, - 1000000 / crtc->hwmode.vrefresh); + tilcdc_crtc->hvtotal_us); tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) From 8153a0fc1212157a4bf8b303cbf51993ff87aa7c Mon Sep 17 00:00:00 2001 From: Roman Kapl Date: Mon, 30 Oct 2017 11:56:13 +0100 Subject: [PATCH 0323/3326] drm/radeon: fix atombios on big endian commit 4f626a4ac8f57ddabf06d03870adab91e463217f upstream. The function for byteswapping the data send to/from atombios was buggy for num_bytes not divisible by four. The function must be aware of the fact that after byte-swapping the u32 units, valid bytes might end up after the num_bytes boundary. This patch was tested on kernel 3.12 and allowed us to sucesfully use DisplayPort on and Radeon SI card. Namely it fixed the link training and EDID readout. The function is patched both in radeon and amd drivers, since the functions and the fixes are identical. Signed-off-by: Roman Kapl Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 38 ++++++++++---------- drivers/gpu/drm/radeon/atombios_dp.c | 38 ++++++++++---------- 2 files changed, 36 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index ce443586a0c71c..cc4e18dcd8b6f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1766,34 +1766,32 @@ bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev) return true; } -/* Atom needs data in little endian format - * so swap as appropriate when copying data to - * or from atom. Note that atom operates on - * dw units. +/* Atom needs data in little endian format so swap as appropriate when copying + * data to or from atom. Note that atom operates on dw units. + * + * Use to_le=true when sending data to atom and provide at least + * ALIGN(num_bytes,4) bytes in the dst buffer. + * + * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4) + * byes in the src buffer. */ void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) { #ifdef __BIG_ENDIAN - u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ - u32 *dst32, *src32; + u32 src_tmp[5], dst_tmp[5]; int i; + u8 align_num_bytes = ALIGN(num_bytes, 4); - memcpy(src_tmp, src, num_bytes); - src32 = (u32 *)src_tmp; - dst32 = (u32 *)dst_tmp; if (to_le) { - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = cpu_to_le32(src32[i]); - memcpy(dst, dst_tmp, num_bytes); + memcpy(src_tmp, src, num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = cpu_to_le32(src_tmp[i]); + memcpy(dst, dst_tmp, align_num_bytes); } else { - u8 dws = num_bytes & ~3; - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = le32_to_cpu(src32[i]); - memcpy(dst, dst_tmp, dws); - if (num_bytes % 4) { - for (i = 0; i < (num_bytes % 4); i++) - dst[dws+i] = dst_tmp[dws+i]; - } + memcpy(src_tmp, src, align_num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = le32_to_cpu(src_tmp[i]); + memcpy(dst, dst_tmp, num_bytes); } #else memcpy(dst, src, num_bytes); diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 432cb46f6a34a1..fd7682bf335dcc 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -45,34 +45,32 @@ static char *pre_emph_names[] = { /***** radeon AUX functions *****/ -/* Atom needs data in little endian format - * so swap as appropriate when copying data to - * or from atom. Note that atom operates on - * dw units. +/* Atom needs data in little endian format so swap as appropriate when copying + * data to or from atom. Note that atom operates on dw units. + * + * Use to_le=true when sending data to atom and provide at least + * ALIGN(num_bytes,4) bytes in the dst buffer. + * + * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4) + * byes in the src buffer. */ void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) { #ifdef __BIG_ENDIAN - u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ - u32 *dst32, *src32; + u32 src_tmp[5], dst_tmp[5]; int i; + u8 align_num_bytes = ALIGN(num_bytes, 4); - memcpy(src_tmp, src, num_bytes); - src32 = (u32 *)src_tmp; - dst32 = (u32 *)dst_tmp; if (to_le) { - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = cpu_to_le32(src32[i]); - memcpy(dst, dst_tmp, num_bytes); + memcpy(src_tmp, src, num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = cpu_to_le32(src_tmp[i]); + memcpy(dst, dst_tmp, align_num_bytes); } else { - u8 dws = num_bytes & ~3; - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = le32_to_cpu(src32[i]); - memcpy(dst, dst_tmp, dws); - if (num_bytes % 4) { - for (i = 0; i < (num_bytes % 4); i++) - dst[dws+i] = dst_tmp[dws+i]; - } + memcpy(src_tmp, src, align_num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = le32_to_cpu(src_tmp[i]); + memcpy(dst, dst_tmp, num_bytes); } #else memcpy(dst, src, num_bytes); From 87139913c0e2d0a7270868a7a36d549dfd3fad5d Mon Sep 17 00:00:00 2001 From: Jonathan Liu Date: Mon, 7 Aug 2017 21:55:45 +1000 Subject: [PATCH 0324/3326] drm/panel: simple: Add missing panel_simple_unprepare() calls commit f3621a8eb59a913612c8e6e37d81f16b649f8b6c upstream. During panel removal or system shutdown panel_simple_disable() is called which disables the panel backlight but the panel is still powered due to missing calls to panel_simple_unprepare(). Fixes: d02fd93e2cd8 ("drm/panel: simple - Disable panel on shutdown") Signed-off-by: Jonathan Liu Signed-off-by: Thierry Reding Link: https://patchwork.freedesktop.org/patch/msgid/20170807115545.27747-1-net147@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/panel/panel-simple.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 474fa759e06ec5..234af81fb3d016 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -369,6 +369,7 @@ static int panel_simple_remove(struct device *dev) drm_panel_remove(&panel->base); panel_simple_disable(&panel->base); + panel_simple_unprepare(&panel->base); if (panel->ddc) put_device(&panel->ddc->dev); @@ -384,6 +385,7 @@ static void panel_simple_shutdown(struct device *dev) struct panel_simple *panel = dev_get_drvdata(dev); panel_simple_disable(&panel->base); + panel_simple_unprepare(&panel->base); } static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = { From f5e69000aa80880491ca4a613679bd21c60b048b Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Tue, 15 Aug 2017 15:14:25 +0100 Subject: [PATCH 0325/3326] drm/hisilicon: Ensure LDI regs are properly configured. commit a2f042430784d86eb2b7a6d2a869f552da30edba upstream. This patch fixes the following soft lockup: BUG: soft lockup - CPU#0 stuck for 23s! [weston:307] On weston idle-timeout the IP is powered down and reset asserted. On weston resume we get a massive vblank IRQ storm due to the LDI registers having lost some state. This state loss is caused by ade_crtc_atomic_begin() not calling ade_ldi_set_mode(). With this patch applied resuming from Weston idle-timeout works well. Signed-off-by: Peter Griffin Tested-by: John Stultz Reviewed-by: Xinliang Liu Signed-off-by: Xinliang Liu Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 9823477b1855cd..2269be91f3e169 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -534,9 +534,12 @@ static void ade_crtc_atomic_begin(struct drm_crtc *crtc, { struct ade_crtc *acrtc = to_ade_crtc(crtc); struct ade_hw_ctx *ctx = acrtc->ctx; + struct drm_display_mode *mode = &crtc->state->mode; + struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode; if (!ctx->power_on) (void)ade_power_up(ctx); + ade_ldi_set_mode(acrtc, mode, adj_mode); } static void ade_crtc_atomic_flush(struct drm_crtc *crtc, From 78f4bb6e965ddd9f60f607dfaf6e20109da84f6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 30 Oct 2017 14:57:43 +0100 Subject: [PATCH 0326/3326] drm/ttm: once more fix ttm_buffer_object_transfer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 4d98e5ee6084f6d7bc578c5d5f86de7156aaa4cb upstream. When the mutex is locked just in the moment we copy it we end up with a warning that we release a locked mutex. Fix this by properly reinitializing the mutex. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/ttm/ttm_bo_util.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index c934ad5b39036c..7c2fbdbbd04887 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -474,6 +474,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); INIT_LIST_HEAD(&fbo->io_reserve_lru); + mutex_init(&fbo->wu_mutex); fbo->moving = NULL; drm_vma_node_reset(&fbo->vma_node); atomic_set(&fbo->cpu_writers, 0); From e4c84b504e02f6f7103cdc67b1adbe304910c104 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 17 Nov 2017 16:41:16 +0800 Subject: [PATCH 0327/3326] drm/amd/pp: fix typecast error in powerplay. commit 8d8258bdab735d9f3c4b78e091ecfbb2b2b1f2ca upstream. resulted in unexpected data truncation Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index 84f01fd33aff70..b50aa292d0266f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -850,9 +850,9 @@ static int init_over_drive_limits( const ATOM_Tonga_POWERPLAYTABLE *powerplay_table) { hwmgr->platform_descriptor.overdriveLimit.engineClock = - le16_to_cpu(powerplay_table->ulMaxODEngineClock); + le32_to_cpu(powerplay_table->ulMaxODEngineClock); hwmgr->platform_descriptor.overdriveLimit.memoryClock = - le16_to_cpu(powerplay_table->ulMaxODMemoryClock); + le32_to_cpu(powerplay_table->ulMaxODMemoryClock); hwmgr->platform_descriptor.minOverdriveVDDC = 0; hwmgr->platform_descriptor.maxOverdriveVDDC = 0; From dde499d3cf6edea946ff59cb5d72e4f9ef21f3fd Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 28 Nov 2017 12:16:03 +0100 Subject: [PATCH 0328/3326] drm/fb_helper: Disable all crtc's when initial setup fails. commit 52dd06506e9bbc2a37b352df7dfc5468f8da21fd upstream. Some drivers like i915 start with crtc's enabled, but with deferred fbcon setup they were no longer disabled as part of fbdev setup. Headless units could no longer enter pc3 state because the crtc was still enabled. Fix this by calling restore_fbdev_mode when we would have called it otherwise once during initial fbdev setup. Signed-off-by: Maarten Lankhorst Fixes: ca91a2758fce ("drm/fb-helper: Support deferred setup") Reported-by: Thomas Voegtle Tested-by: Thomas Voegtle Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20171128111603.62757-1-maarten.lankhorst@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/drm_fb_helper.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 1b8f013ffa6503..5e93589c335cb6 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1809,6 +1809,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { DRM_INFO("Cannot find any crtc or sizes\n"); + + /* First time: disable all crtc's.. */ + if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master)) + restore_fbdev_mode(fb_helper); return -EAGAIN; } From e685410c5f30a19c54d652e033942ed270d31b61 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 10 Nov 2017 17:38:34 +0100 Subject: [PATCH 0329/3326] drm/fsl-dcu: Don't set connector DPMS property commit daee54263c1202cbdab85c5e15ae30b417602efb upstream. Since commit 4a97a3da420b ("drm: Don't update property values for atomic drivers") atomic drivers must not update property values as properties are read from the state instead. To catch remaining users, the drm_object_property_set_value() function now throws a warning when called by atomic drivers on non-immutable properties, and we hit that warning when creating connectors. The easy fix is to just remove the drm_object_property_set_value() as it is used here to set the initial value of the connector's DPMS property to OFF. The DPMS property applies on top of the connector's state crtc pointer (initialized to NULL) that is the main connector on/off control, and should thus default to ON. Fixes: 4a97a3da420b ("drm: Don't update property values for atomic drivers") Signed-off-by: Laurent Pinchart Signed-off-by: Stefan Agner Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index edd7d8127d194d..c54806d08dd78d 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -102,7 +102,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev, { struct drm_encoder *encoder = &fsl_dev->encoder; struct drm_connector *connector = &fsl_dev->connector.base; - struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config; int ret; fsl_dev->connector.encoder = encoder; @@ -122,10 +121,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev, if (ret < 0) goto err_sysfs; - drm_object_property_set_value(&connector->base, - mode_config->dpms_property, - DRM_MODE_DPMS_OFF); - ret = drm_panel_attach(panel, connector); if (ret) { dev_err(fsl_dev->dev, "failed to attach panel\n"); From d8f74d70634aa0dfe063d0e445f683b0545fa5b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 8 Nov 2017 17:25:04 +0200 Subject: [PATCH 0330/3326] drm/edid: Don't send non-zero YQ in AVI infoframe for HDMI 1.x sinks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 9271c0ca573e02a360b636ecd8cb408852f4e9f6 upstream. Apparently some sinks look at the YQ bits even when receiving RGB, and they get somehow confused when they see a non-zero YQ value. So we can't just blindly follow CEA-861-F and set YQ to match the RGB range. Unfortunately there is no good way to tell whether the sink designer claims to have read CEA-861-F. The CEA extension block revision number has generally been stuck at 3 since forever, and even a very recently manufactured sink might be based on an old design so the manufacturing date doesn't seem like something we can use. In lieu of better information let's follow CEA-861-F only for HDMI 2.0 sinks, since HDMI 2.0 is based on CEA-861-F. For HDMI 1.x sinks we'll always set YQ=0. The alternative would of course be to always set YQ=0. And if we ever encounter a HDMI 2.0+ sink with this bug that's what we'll probably have to do. Cc: Jani Nikula Cc: Eric Anholt Cc: Neil Kownacki Reported-by: Neil Kownacki Tested-by: Neil Kownacki Fixes: fcc8a22cc905 ("drm/edid: Set YQ bits in the AVI infoframe according to CEA-861-F") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101639 Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20171108152504.12596-1-ville.syrjala@linux.intel.com Acked-by: Eric Anholt Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/drm_edid.c | 12 ++++++++++-- drivers/gpu/drm/i915/intel_hdmi.c | 3 ++- drivers/gpu/drm/vc4/vc4_hdmi.c | 3 ++- include/drm/drm_edid.h | 3 ++- 4 files changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 6bb6337be920c6..fc7946eb6665c4 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -4809,7 +4809,8 @@ void drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, const struct drm_display_mode *mode, enum hdmi_quantization_range rgb_quant_range, - bool rgb_quant_range_selectable) + bool rgb_quant_range_selectable, + bool is_hdmi2_sink) { /* * CEA-861: @@ -4833,8 +4834,15 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, * YQ-field to match the RGB Quantization Range being transmitted * (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB, * set YQ=1) and the Sink shall ignore the YQ-field." + * + * Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused + * by non-zero YQ when receiving RGB. There doesn't seem to be any + * good way to tell which version of CEA-861 the sink supports, so + * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based + * on on CEA-861-F. */ - if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED) + if (!is_hdmi2_sink || + rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED) frame->ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED; else diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index e8abea7594ec4a..3fed1d3ecdedfd 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -481,7 +481,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, crtc_state->limited_color_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL, - intel_hdmi->rgb_quant_range_selectable); + intel_hdmi->rgb_quant_range_selectable, + is_hdmi2_sink); /* TODO: handle pixel repetition for YCBCR420 outputs */ intel_write_infoframe(encoder, crtc_state, &frame); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 937da8dd65b8b0..8f71157a2b063b 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -433,7 +433,8 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) vc4_encoder->limited_rgb_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL, - vc4_encoder->rgb_range_selectable); + vc4_encoder->rgb_range_selectable, + false); vc4_hdmi_write_infoframe(encoder, &frame); } diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 1e1908a6b1d66e..a992434ded999d 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -360,7 +360,8 @@ void drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, const struct drm_display_mode *mode, enum hdmi_quantization_range rgb_quant_range, - bool rgb_quant_range_selectable); + bool rgb_quant_range_selectable, + bool is_hdmi2_sink); /** * drm_eld_mnl - Get ELD monitor name length in bytes. From 802328da073a0ba36a1cde899c48a5f07732d4aa Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Tue, 21 Nov 2017 09:08:07 -0500 Subject: [PATCH 0331/3326] drm/amdgpu: move UVD/VCE and VCN structure out from union MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit b43aaee69d4327d05e7624d9471c17d015b4d67d upstream. With the enablement of VCN Dec and Enc from user space, User space queries kernel for the IP information, if HW has UVD/VCE, the info comes from these IP blocks, but this could end up mis-interpret for VCN when they are in the union, the other way same when HW with VCN block. Signed-off-by: Leo Liu Acked-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher Fixes: 95d0906f8506 ("drm/amdgpu: add initial vcn support and decode tests") Reviewed-and-Tested-by: Michel Dänzer Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 103635ab784c98..87801faaf2648a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1536,18 +1536,14 @@ struct amdgpu_device { /* sdma */ struct amdgpu_sdma sdma; - union { - struct { - /* uvd */ - struct amdgpu_uvd uvd; - - /* vce */ - struct amdgpu_vce vce; - }; - - /* vcn */ - struct amdgpu_vcn vcn; - }; + /* uvd */ + struct amdgpu_uvd uvd; + + /* vce */ + struct amdgpu_vce vce; + + /* vcn */ + struct amdgpu_vcn vcn; /* firmwares */ struct amdgpu_firmware firmware; From 248b0ec5ad2501a35f2603384f1c7c7cee79eb02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Wed, 22 Nov 2017 15:55:21 +0100 Subject: [PATCH 0332/3326] drm/amdgpu: Set adev->vcn.irq.num_types for VCN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 89ce6e0afee8eafa679093207dabd717af9d09c5 upstream. We were setting adev->uvd.irq.num_types instead. Fixes: 9b257116e784 ("drm/amdgpu: add vcn enc irq support") Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 21e7b88401e1e3..a098712bdd2fff 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = { static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs; } From 75e63076249aedd3a7a3c38677d135cc146f1f4b Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Fri, 17 Nov 2017 15:27:28 -0800 Subject: [PATCH 0333/3326] include/linux/compiler-clang.h: handle randomizable anonymous structs commit 4ca59b14e588f873795a11cdc77a25c686a29d23 upstream. The GCC randomize layout plugin can randomize the member offsets of sensitive kernel data structures. To use this feature, certain annotations and members are added to the structures which affect the member offsets even if this plugin is not used. All of these structures are completely randomized, except for task_struct which leaves out some of its members. All the other members are wrapped within an anonymous struct with the __randomize_layout attribute. This is done using the randomized_struct_fields_start and randomized_struct_fields_end defines. When the plugin is disabled, the behaviour of this attribute can vary based on the GCC version. For GCC 5.1+, this attribute maps to __designated_init otherwise it is just an empty define but the anonymous structure is still present. For other compilers, both randomized_struct_fields_start and randomized_struct_fields_end default to empty defines meaning the anonymous structure is not introduced at all. So, if a module compiled with Clang, such as a BPF program, needs to access task_struct fields such as pid and comm, the offsets of these members as recognized by Clang are different from those recognized by modules compiled with GCC. If GCC 4.6+ is used to build the kernel, this can be solved by introducing appropriate defines for Clang so that the anonymous structure is seen when determining the offsets for the members. Link: http://lkml.kernel.org/r/20171109064645.25581-1-sandipan@linux.vnet.ibm.com Signed-off-by: Sandipan Das Cc: David Rientjes Cc: Greg Kroah-Hartman Cc: Kate Stewart Cc: Thomas Gleixner Cc: Naveen N. Rao Cc: Alexei Starovoitov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/compiler-clang.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 54dfef70a07274..780b1242bf24de 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -16,3 +16,6 @@ * with any version that can compile the kernel */ #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +#define randomized_struct_fields_start struct { +#define randomized_struct_fields_end }; From 2b24fdca153f0fd6c2d0f1f60c7606a53b6db575 Mon Sep 17 00:00:00 2001 From: Don Hiatt Date: Mon, 2 Oct 2017 11:04:48 -0700 Subject: [PATCH 0334/3326] IB/core: Do not warn on lid conversions for OPA commit 6588e412fe872ed81f3fb8d9b4561a66ecb763d0 upstream. On OPA devices the user_mad recv_handler can receive 32Bit LIDs (e.g. OPA_PERMISSIVE_LID) and it is okay to lose the upper 16 bits of the LID as this information is obtained elsewhere. Do not issue a warning when calling ib_lid_be16() in this case by masking out the upper 16Bits. [75667.310846] ------------[ cut here ]------------ [75667.316447] WARNING: CPU: 0 PID: 1718 at ./include/rdma/ib_verbs.h:3799 recv_handler+0x15a/0x170 [ib_umad] [75667.327640] Modules linked in: ib_ipoib hfi1(E) rdmavt(E) rdma_ucm(E) ib_ucm(E) rdma_cm(E) ib_cm(E) iw_cm(E) ib_umad(E) ib_uverbs(E) ib_core(E) libiscsi scsi_transport_iscsi dm_mirror dm_region_hash dm_log dm_mod dax x86_pkg_temp_thermal intel_powerclamp coretemp kvm irqbypass crct10dif_pclmul crc32_pclmul ghash_clmulni_intel pcbc aesni_intel mei_me ipmi_si iTCO_wdt iTCO_vendor_support crypto_simd ipmi_devintf pcspkr mei sg i2c_i801 glue_helper lpc_ich shpchp ioatdma mfd_core wmi ipmi_msghandler cryptd acpi_power_meter acpi_pad nfsd auth_rpcgss nfs_acl lockd grace sunrpc ip_tables xfs libcrc32c sd_mod mgag200 drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ttm drm igb ptp ahci libahci pps_core crc32c_intel libata dca i2c_algo_bit i2c_core [last unloaded: ib_core] [75667.407704] CPU: 0 PID: 1718 Comm: kworker/0:1H Tainted: G W I E 4.13.0-rc7+ #1 [75667.417310] Hardware name: Intel Corporation S2600WT2/S2600WT2, BIOS SE5C610.86B.01.01.0008.021120151325 02/11/2015 [75667.429555] Workqueue: ib-comp-wq ib_cq_poll_work [ib_core] [75667.436360] task: ffff88084a718000 task.stack: ffffc9000a424000 [75667.443549] RIP: 0010:recv_handler+0x15a/0x170 [ib_umad] [75667.450090] RSP: 0018:ffffc9000a427ce8 EFLAGS: 00010286 [75667.456508] RAX: 00000000ffffffff RBX: ffff88085159ce80 RCX: 0000000000000000 [75667.465094] RDX: ffff88085a47b068 RSI: 0000000000000000 RDI: ffff88085159cf00 [75667.473668] RBP: ffffc9000a427d38 R08: 000000000001efc0 R09: ffff88085159ce80 [75667.482228] R10: ffff88085f007480 R11: ffff88084acf20e8 R12: ffff88085a47b020 [75667.490824] R13: ffff881056842e10 R14: ffff881056840200 R15: ffff88104c8d0800 [75667.499390] FS: 0000000000000000(0000) GS:ffff88085f400000(0000) knlGS:0000000000000000 [75667.509028] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [75667.516080] CR2: 00007f9e4b3d9000 CR3: 0000000001c09000 CR4: 00000000001406f0 [75667.524664] Call Trace: [75667.528044] ? find_mad_agent+0x7c/0x1b0 [ib_core] [75667.534031] ? ib_mark_mad_done+0x73/0xa0 [ib_core] [75667.540142] ib_mad_recv_done+0x423/0x9b0 [ib_core] [75667.546215] __ib_process_cq+0x5d/0xb0 [ib_core] [75667.552007] ib_cq_poll_work+0x20/0x60 [ib_core] [75667.557766] process_one_work+0x149/0x360 [75667.562844] worker_thread+0x4d/0x3c0 [75667.567529] kthread+0x109/0x140 [75667.571713] ? rescuer_thread+0x380/0x380 [75667.576775] ? kthread_park+0x60/0x60 [75667.581447] ret_from_fork+0x25/0x30 [75667.586014] Code: 43 4a 0f b6 45 c6 88 43 4b 48 8b 45 b0 48 89 43 4c 48 8b 45 b8 48 89 43 54 8b 45 c0 0f c8 89 43 5c e9 79 ff ff ff e8 16 4e fa e0 <0f> ff e9 42 ff ff ff 66 66 66 66 66 66 2e 0f 1f 84 00 00 00 00 [75667.608323] ---[ end trace cf26df27c9597264 ]--- Fixes: 62ede7779904 ("Add OPA extended LID support") Reviewed-by: Mike Marciniszyn Signed-off-by: Don Hiatt Signed-off-by: Dennis Dalessandro Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/user_mad.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index c1696e6084b2b1..603acaf91828ae 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -229,7 +229,16 @@ static void recv_handler(struct ib_mad_agent *agent, packet->mad.hdr.status = 0; packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); - packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid); + /* + * On OPA devices it is okay to lose the upper 16 bits of LID as this + * information is obtained elsewhere. Mask off the upper 16 bits. + */ + if (agent->device->port_immutable[agent->port_num].core_cap_flags & + RDMA_CORE_PORT_INTEL_OPA) + packet->mad.hdr.lid = ib_lid_be16(0xFFFF & + mad_recv_wc->wc->slid); + else + packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid); packet->mad.hdr.sl = mad_recv_wc->wc->sl; packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; From c48f3c04984557e34cf2b7194eb4d7947edccc5e Mon Sep 17 00:00:00 2001 From: Don Hiatt Date: Mon, 2 Oct 2017 11:04:55 -0700 Subject: [PATCH 0335/3326] IB/hfi1: Do not warn on lid conversions for OPA commit 4988be5813ff2afdc0d8bfa315ef34a577d3efbf upstream. On OPA devices opa_local_smp_check will receive 32Bit LIDs when the LID is Extended. In such cases, it is okay to lose the upper 16 bits of the LID as this information is obtained elsewhere. Do not issue a warning when calling ib_lid_cpu16() in this case by masking out the upper 16Bits. [75920.148985] ------------[ cut here ]------------ [75920.154651] WARNING: CPU: 0 PID: 1718 at ./include/rdma/ib_verbs.h:3788 hfi1_process_mad+0x1c1f/0x1c80 [hfi1] [75920.166192] Modules linked in: ib_ipoib hfi1(E) rdmavt(E) rdma_ucm(E) ib_ucm(E) rdma_cm(E) ib_cm(E) iw_cm(E) ib_umad(E) ib_uverbs(E) ib_core(E) libiscsi scsi_transport_iscsi dm_mirror dm_region_hash dm_log dm_mod dax x86_pkg_temp_thermal intel_powerclamp coretemp kvm irqbypass crct10dif_pclmul crc32_pclmul ghash_clmulni_intel pcbc aesni_intel mei_me ipmi_si iTCO_wdt iTCO_vendor_support crypto_simd ipmi_devintf pcspkr mei sg i2c_i801 glue_helper lpc_ich shpchp ioatdma mfd_core wmi ipmi_msghandler cryptd acpi_power_meter acpi_pad nfsd auth_rpcgss nfs_acl lockd grace sunrpc ip_tables xfs libcrc32c sd_mod mgag200 drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ttm drm igb ptp ahci libahci pps_core crc32c_intel libata dca i2c_algo_bit i2c_core [last unloaded: ib_core] [75920.246331] CPU: 0 PID: 1718 Comm: kworker/0:1H Tainted: G W I E 4.13.0-rc7+ #1 [75920.255907] Hardware name: Intel Corporation S2600WT2/S2600WT2, BIOS SE5C610.86B.01.01.0008.021120151325 02/11/2015 [75920.268158] Workqueue: ib-comp-wq ib_cq_poll_work [ib_core] [75920.274934] task: ffff88084a718000 task.stack: ffffc9000a424000 [75920.282123] RIP: 0010:hfi1_process_mad+0x1c1f/0x1c80 [hfi1] [75920.288881] RSP: 0018:ffffc9000a427c38 EFLAGS: 00010206 [75920.295265] RAX: 0000000000010001 RBX: ffff8808361420e8 RCX: ffff880837811d80 [75920.303784] RDX: 0000000000000002 RSI: 0000000000007fff RDI: ffff880837811d80 [75920.312302] RBP: ffffc9000a427d38 R08: 0000000000000000 R09: ffff8808361420e8 [75920.320819] R10: ffff88083841f0e8 R11: ffffc9000a427da8 R12: 0000000000000001 [75920.329335] R13: ffff880837810000 R14: 0000000000000000 R15: ffff88084f1a4800 [75920.337849] FS: 0000000000000000(0000) GS:ffff88085f400000(0000) knlGS:0000000000000000 [75920.347450] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [75920.354405] CR2: 00007f9e4b3d9000 CR3: 0000000001c09000 CR4: 00000000001406f0 [75920.362947] Call Trace: [75920.366257] ? ib_mad_recv_done+0x258/0x9b0 [ib_core] [75920.372457] ? ib_mad_recv_done+0x258/0x9b0 [ib_core] [75920.378652] ? __kmalloc+0x1df/0x210 [75920.383229] ib_mad_recv_done+0x305/0x9b0 [ib_core] [75920.389270] __ib_process_cq+0x5d/0xb0 [ib_core] [75920.395032] ib_cq_poll_work+0x20/0x60 [ib_core] [75920.400777] process_one_work+0x149/0x360 [75920.405836] worker_thread+0x4d/0x3c0 [75920.410505] kthread+0x109/0x140 [75920.414681] ? rescuer_thread+0x380/0x380 [75920.419731] ? kthread_park+0x60/0x60 [75920.424406] ret_from_fork+0x25/0x30 [75920.428972] Code: 4c 89 9d 58 ff ff ff 49 89 45 00 66 b8 00 02 49 89 45 08 e8 44 27 89 e0 4c 8b 9d 58 ff ff ff e9 d8 f6 ff ff 0f ff e9 55 e7 ff ff <0f> ff e9 3b e5 ff ff 0f ff 0f 1f 84 00 00 00 00 00 e9 4b e9 ff [75921.451269] ---[ end trace cf26df27c9597265 ]--- Fixes: 62ede7779904 ("Add OPA extended LID support") Reviewed-by: Mike Marciniszyn Signed-off-by: Don Hiatt Signed-off-by: Dennis Dalessandro Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/hfi1/mad.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index f4c0ffc040cc59..07b80faf167528 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -4293,7 +4293,6 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp, const struct ib_wc *in_wc) { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - u16 slid = ib_lid_cpu16(in_wc->slid); u16 pkey; if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys)) @@ -4320,7 +4319,11 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp, */ if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) return 0; - ingress_pkey_table_fail(ppd, pkey, slid); + /* + * On OPA devices it is okay to lose the upper 16 bits of LID as this + * information is obtained elsewhere. Mask off the upper 16 bits. + */ + ingress_pkey_table_fail(ppd, pkey, ib_lid_cpu16(0xFFFF & in_wc->slid)); return 1; } From 635526b896ca34fff5487d3ac00d04afbee7e1e4 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Mon, 6 Nov 2017 08:31:59 +0200 Subject: [PATCH 0336/3326] e1000e: fix the use of magic numbers for buffer overrun issue commit c0f4b163a03e73055dd734eaca64b9580e72e7fb upstream. This is a follow on to commit b10effb92e27 ("fix buffer overrun while the I219 is processing DMA transactions") to address David Laights concerns about the use of "magic" numbers. So define masks as well as add additional code comments to give a better understanding of what needs to be done to avoid a buffer overrun. Signed-off-by: Sasha Neftin Reviewed-by: Alexander H Duyck Reviewed-by: Dima Ruinskiy Reviewed-by: Raanan Avargil Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/e1000e/ich8lan.h | 3 ++- drivers/net/ethernet/intel/e1000e/netdev.c | 9 ++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 67163ca898ba2a..00a36df02a3fd9 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -113,7 +113,8 @@ #define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ #define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ #define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ -#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) +#define E1000_TARC0_CB_MULTIQ_3_REQ 0x30000000 +#define E1000_TARC0_CB_MULTIQ_2_REQ 0x20000000 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index c38b00c90f4801..991c2a0dd67e07 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3030,9 +3030,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ew32(IOSFPC, reg_val); reg_val = er32(TARC(0)); - /* SPT and KBL Si errata workaround to avoid Tx hang */ - reg_val &= ~BIT(28); - reg_val |= BIT(29); + /* SPT and KBL Si errata workaround to avoid Tx hang. + * Dropping the number of outstanding requests from + * 3 to 2 in order to avoid a buffer overrun. + */ + reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ; + reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ; ew32(TARC(0), reg_val); } } From a67936842ba66eb229e6322856fefd2310e0b3fb Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 17 Oct 2017 14:24:09 +1100 Subject: [PATCH 0337/3326] md: forbid a RAID5 from having both a bitmap and a journal. commit 230b55fa8d64007339319539f8f8e68114d08529 upstream. Having both a bitmap and a journal is pointless. Attempting to do so can corrupt the bitmap if the journal replay happens before the bitmap is initialized. Rather than try to avoid this corruption, simply refuse to allow arrays with both a bitmap and a journal. So: - if raid5_run sees both are present, fail. - if adding a bitmap finds a journal is present, fail - if adding a journal finds a bitmap is present, fail. Signed-off-by: NeilBrown Tested-by: Joshua Kinard Acked-by: Joshua Kinard Signed-off-by: Shaohua Li Signed-off-by: Greg Kroah-Hartman --- drivers/md/bitmap.c | 6 ++++++ drivers/md/md.c | 2 +- drivers/md/raid5.c | 7 +++++++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index cae57b5be817a2..f425905c97fa93 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1816,6 +1816,12 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot) BUG_ON(file && mddev->bitmap_info.offset); + if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { + pr_notice("md/raid:%s: array with journal cannot have bitmap\n", + mdname(mddev)); + return ERR_PTR(-EBUSY); + } + bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); if (!bitmap) return ERR_PTR(-ENOMEM); diff --git a/drivers/md/md.c b/drivers/md/md.c index e019cf8c0d131a..98ea86309ceb44 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -6362,7 +6362,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) break; } } - if (has_journal) { + if (has_journal || mddev->bitmap) { export_rdev(rdev); return -EBUSY; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 928e24a071338a..7aed69a4f65529 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7156,6 +7156,13 @@ static int raid5_run(struct mddev *mddev) min_offset_diff = diff; } + if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) && + (mddev->bitmap_info.offset || mddev->bitmap_info.file)) { + pr_notice("md/raid:%s: array cannot have both journal and bitmap\n", + mdname(mddev)); + return -EINVAL; + } + if (mddev->reshape_position != MaxSector) { /* Check that we can continue the reshape. * Difficulties arise if the stripe we would write to From 6613dc721fc232698aafd0f6e214137cfda61e35 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 10 Nov 2017 16:03:01 +0100 Subject: [PATCH 0338/3326] drm/i915: Fix false-positive assert_rpm_wakelock_held in i915_pmic_bus_access_notifier v2 commit f4359cedfb43b934f38c50d1604db21333abe57b upstream. assert_rpm_wakelock_held is triggered from i915_pmic_bus_access_notifier even though it gets unregistered on (runtime) suspend, this is caused by a race happening under the following circumstances: intel_runtime_pm_put does: atomic_dec(&dev_priv->pm.wakeref_count); pm_runtime_mark_last_busy(kdev); pm_runtime_put_autosuspend(kdev); And pm_runtime_put_autosuspend calls intel_runtime_suspend from a workqueue, so there is ample of time between the atomic_dec() and intel_runtime_suspend() unregistering the notifier. If the notifier gets called in this windowd assert_rpm_wakelock_held falsely triggers (at this point we're not runtime-suspended yet). This commit adds disable_rpm_wakeref_asserts and enable_rpm_wakeref_asserts calls around the intel_uncore_forcewake_get(FORCEWAKE_ALL) call in i915_pmic_bus_access_notifier fixing the false-positive WARN_ON. Changes in v2: -Reword comment explaining why disabling the wakeref asserts is ok and necessary Reported-by: FKr Reviewed-by: Imre Deak Signed-off-by: Hans de Goede Link: https://patchwork.freedesktop.org/patch/msgid/20171110150301.9601-2-hdegoede@redhat.com (cherry picked from commit ce30560c80dead91e98a03d90fb8791e57a9b69d) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_uncore.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 1d7b879cc68c60..2d3aad319229b1 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1171,8 +1171,15 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, * bus, which will be busy after this notification, leading to: * "render: timed out waiting for forcewake ack request." * errors. + * + * The notifier is unregistered during intel_runtime_suspend(), + * so it's ok to access the HW here without holding a RPM + * wake reference -> disable wakeref asserts for the time of + * the access. */ + disable_rpm_wakeref_asserts(dev_priv); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + enable_rpm_wakeref_asserts(dev_priv); break; case MBI_PMIC_BUS_ACCESS_END: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); From 569e3d1de49f0896733de8e709fc1fd28034d7dc Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 14 Nov 2017 14:55:17 +0100 Subject: [PATCH 0339/3326] drm/i915: Re-register PMIC bus access notifier on runtime resume commit 294cf1af8cf2eb0d1eced377fdfb9a2d3f0e8b42 upstream. intel_uncore_suspend() unregisters the uncore code's PMIC bus access notifier and gets called on both normal and runtime suspend. intel_uncore_resume_early() re-registers the notifier, but only on normal resume. Add a new intel_uncore_runtime_resume() function which only re-registers the notifier and call that on runtime resume. Reported-by: Imre Deak Reviewed-by: Imre Deak Signed-off-by: Hans de Goede Link: https://patchwork.freedesktop.org/patch/msgid/20171114135518.15981-2-hdegoede@redhat.com (cherry picked from commit bedf4d79c3654921839b62246b0965ddb308b201) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/i915_drv.c | 2 ++ drivers/gpu/drm/i915/intel_uncore.c | 6 ++++++ drivers/gpu/drm/i915/intel_uncore.h | 1 + 3 files changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9f45cfeae77554..82498f8232eb04 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2591,6 +2591,8 @@ static int intel_runtime_resume(struct device *kdev) ret = vlv_resume_prepare(dev_priv, true); } + intel_uncore_runtime_resume(dev_priv); + /* * No point of rolling back things in case of an error, as the best * we can do is to hope that things will still work (and disable RPM). diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 2d3aad319229b1..e9ed0251840686 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -434,6 +434,12 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv) i915_check_and_clear_faults(dev_priv); } +void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv) +{ + iosf_mbi_register_pmic_bus_access_notifier( + &dev_priv->uncore.pmic_bus_access_nb); +} + void intel_uncore_sanitize(struct drm_i915_private *dev_priv) { i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 5f90278da46121..0bdc3fcc0e64d4 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -121,6 +121,7 @@ bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv void intel_uncore_fini(struct drm_i915_private *dev_priv); void intel_uncore_suspend(struct drm_i915_private *dev_priv); void intel_uncore_resume_early(struct drm_i915_private *dev_priv); +void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv); u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); From 7042c2b9a19e74972f5783ab3b7ef98ebdee293f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 25 Nov 2017 19:41:55 +0000 Subject: [PATCH 0340/3326] drm/i915/fbdev: Serialise early hotplug events with async fbdev config commit a45b30a6c5db631e2ba680304bd5edd0cd1f9643 upstream. As both the hotplug event and fbdev configuration run asynchronously, it is possible for them to run concurrently. If configuration fails, we were freeing the fbdev causing a use-after-free in the hotplug event. <7>[ 3069.935211] [drm:intel_fb_initial_config [i915]] Not using firmware configuration <7>[ 3069.935225] [drm:drm_setup_crtcs] looking for cmdline mode on connector 77 <7>[ 3069.935229] [drm:drm_setup_crtcs] looking for preferred mode on connector 77 0 <7>[ 3069.935233] [drm:drm_setup_crtcs] found mode 3200x1800 <7>[ 3069.935236] [drm:drm_setup_crtcs] picking CRTCs for 8192x8192 config <7>[ 3069.935253] [drm:drm_setup_crtcs] desired mode 3200x1800 set on crtc 43 (0,0) <7>[ 3069.935323] [drm:intelfb_create [i915]] no BIOS fb, allocating a new one <4>[ 3069.967737] general protection fault: 0000 [#1] PREEMPT SMP <0>[ 3069.977453] --------------------------------- <4>[ 3069.977457] Modules linked in: i915(+) vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic x86_pkg_temp_thermal intel_powerclamp coretemp crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_codec snd_hwdep snd_hda_core snd_pcm r8169 mei_me mii prime_numbers mei i2c_hid pinctrl_geminilake pinctrl_intel [last unloaded: i915] <4>[ 3069.977492] CPU: 1 PID: 15414 Comm: kworker/1:0 Tainted: G U 4.14.0-CI-CI_DRM_3388+ #1 <4>[ 3069.977497] Hardware name: Intel Corp. Geminilake/GLK RVP1 DDR4 (05), BIOS GELKRVPA.X64.0062.B30.1708222146 08/22/2017 <4>[ 3069.977508] Workqueue: events output_poll_execute <4>[ 3069.977512] task: ffff880177734e40 task.stack: ffffc90001fe4000 <4>[ 3069.977519] RIP: 0010:__lock_acquire+0x109/0x1b60 <4>[ 3069.977523] RSP: 0018:ffffc90001fe7bb0 EFLAGS: 00010002 <4>[ 3069.977526] RAX: 6b6b6b6b6b6b6b6b RBX: 0000000000000282 RCX: 0000000000000000 <4>[ 3069.977530] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff880170d4efd0 <4>[ 3069.977534] RBP: ffffc90001fe7c70 R08: 0000000000000001 R09: 0000000000000000 <4>[ 3069.977538] R10: 0000000000000000 R11: ffffffff81899609 R12: ffff880170d4efd0 <4>[ 3069.977542] R13: ffff880177734e40 R14: 0000000000000001 R15: 0000000000000000 <4>[ 3069.977547] FS: 0000000000000000(0000) GS:ffff88017fc80000(0000) knlGS:0000000000000000 <4>[ 3069.977551] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4>[ 3069.977555] CR2: 00007f7e8b7bcf04 CR3: 0000000003e0f000 CR4: 00000000003406e0 <4>[ 3069.977559] Call Trace: <4>[ 3069.977565] ? mark_held_locks+0x64/0x90 <4>[ 3069.977571] ? _raw_spin_unlock_irq+0x24/0x50 <4>[ 3069.977575] ? _raw_spin_unlock_irq+0x24/0x50 <4>[ 3069.977579] ? trace_hardirqs_on_caller+0xde/0x1c0 <4>[ 3069.977583] ? _raw_spin_unlock_irq+0x2f/0x50 <4>[ 3069.977588] ? finish_task_switch+0xa5/0x210 <4>[ 3069.977592] ? lock_acquire+0xaf/0x200 <4>[ 3069.977596] lock_acquire+0xaf/0x200 <4>[ 3069.977600] ? __mutex_lock+0x5e9/0x9b0 <4>[ 3069.977604] _raw_spin_lock+0x2a/0x40 <4>[ 3069.977608] ? __mutex_lock+0x5e9/0x9b0 <4>[ 3069.977612] __mutex_lock+0x5e9/0x9b0 <4>[ 3069.977616] ? drm_fb_helper_hotplug_event.part.19+0x16/0xa0 <4>[ 3069.977621] ? drm_fb_helper_hotplug_event.part.19+0x16/0xa0 <4>[ 3069.977625] drm_fb_helper_hotplug_event.part.19+0x16/0xa0 <4>[ 3069.977630] output_poll_execute+0x8d/0x180 <4>[ 3069.977635] process_one_work+0x22e/0x660 <4>[ 3069.977640] worker_thread+0x48/0x3a0 <4>[ 3069.977644] ? _raw_spin_unlock_irqrestore+0x4c/0x60 <4>[ 3069.977649] kthread+0x102/0x140 <4>[ 3069.977653] ? process_one_work+0x660/0x660 <4>[ 3069.977657] ? kthread_create_on_node+0x40/0x40 <4>[ 3069.977662] ret_from_fork+0x27/0x40 <4>[ 3069.977666] Code: 8d 62 f8 c3 49 81 3c 24 e0 fa 3c 82 41 be 00 00 00 00 45 0f 45 f0 83 fe 01 77 86 89 f0 49 8b 44 c4 08 48 85 c0 0f 84 76 ff ff ff ff 80 38 01 00 00 8b 1d 62 f9 e8 01 45 8b 85 b8 08 00 00 85 <1>[ 3069.977707] RIP: __lock_acquire+0x109/0x1b60 RSP: ffffc90001fe7bb0 <4>[ 3069.977712] ---[ end trace 4ad012eb3af62df7 ]--- In order to keep the dev_priv->ifbdev alive after failure, we have to avoid the free and leave it empty until we unload the module (which is less than ideal, but a necessary evil for simplicity). Then we can use intel_fbdev_sync() to serialise the hotplug event with the configuration. The serialisation between the two was removed in commit 934458c2c95d ("Revert "drm/i915: Fix races on fbdev""), but the use after free is much older, commit 366e39b4d2c5 ("drm/i915: Tear down fbdev if initialization fails") Fixes: 366e39b4d2c5 ("drm/i915: Tear down fbdev if initialization fails") Fixes: 934458c2c95d ("Revert "drm/i915: Fix races on fbdev"") Signed-off-by: Chris Wilson Cc: Lukas Wunner Cc: Joonas Lahtinen Cc: Daniel Vetter Reviewed-by: Lukas Wunner Link: https://patchwork.freedesktop.org/patch/msgid/20171125194155.355-1-chris@chris-wilson.co.uk (cherry picked from commit ad88d7fc6c032ddfb32b8d496a070ab71de3a64f) Signed-off-by: Joonas Lahtinen Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_fbdev.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 262e75c00dd2f6..da2d309574ba94 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -694,10 +694,8 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) /* Due to peculiar init order wrt to hpd handling this is separate. */ if (drm_fb_helper_initial_config(&ifbdev->helper, - ifbdev->preferred_bpp)) { + ifbdev->preferred_bpp)) intel_fbdev_unregister(to_i915(ifbdev->helper.dev)); - intel_fbdev_fini(to_i915(ifbdev->helper.dev)); - } } void intel_fbdev_initial_config_async(struct drm_device *dev) @@ -797,7 +795,11 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; - if (ifbdev) + if (!ifbdev) + return; + + intel_fbdev_sync(ifbdev); + if (ifbdev->vma) drm_fb_helper_hotplug_event(&ifbdev->helper); } From 6c0d3d1d5908f5cd5c838c99663238cac201953b Mon Sep 17 00:00:00 2001 From: Xiong Zhang Date: Tue, 28 Nov 2017 07:29:54 +0800 Subject: [PATCH 0341/3326] drm/i915/gvt: Correct ADDR_4K/2M/1G_MASK definition commit b721b65af4eb46df6a1d9e34b14003225e403565 upstream. For ADDR_4K_MASK, bit[45..12] should be 1, all other bits should be 0. The current definition wrongly set bit[46] as 1 also. This path fixes this. v2: Add commit message, fixes and cc stable.(Zhenyu) Fixes: 2707e4446688("drm/i915/gvt: vGPU graphics memory virtualization") Signed-off-by: Xiong Zhang Signed-off-by: Zhenyu Wang Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/gvt/gtt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index e6dfc3331f4bb4..a385838e2919e3 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -311,9 +311,9 @@ static inline int gtt_set_entry64(void *pt, #define GTT_HAW 46 -#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30) -#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21) -#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12) +#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30) +#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21) +#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12) static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) { From e522b9247ce931a7319f1d7e9d0bdfa5817b9a4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 23 Nov 2017 21:41:56 +0200 Subject: [PATCH 0342/3326] drm/i915: Don't try indexed reads to alternate slave addresses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit ae5c631e605a452a5a0e73205a92810c01ed954b upstream. We can only specify the one slave address to indexed reads/writes. Make sure the messages we check are destined to the same slave address before deciding to do an indexed transfer. Cc: Daniel Kurtz Cc: Chris Wilson Cc: Daniel Vetter Cc: Sean Paul Fixes: 56f9eac05489 ("drm/i915/intel_i2c: use INDEX cycles for i2c read transactions") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20171123194157.25367-2-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson (cherry picked from commit c4deb62d7821672265b87952bcd1c808f3bf3e8f) Signed-off-by: Joonas Lahtinen Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_i2c.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index eb5827110d8ffc..8affd47b98b814 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -438,6 +438,7 @@ static bool gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) { return (i + 1 < num && + msgs[i].addr == msgs[i + 1].addr && !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 && (msgs[i + 1].flags & I2C_M_RD)); } From 88e1bb2a24125747d48b5e2d60bccdb247b86eb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 23 Nov 2017 21:41:57 +0200 Subject: [PATCH 0343/3326] drm/i915: Prevent zero length "index" write MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 56350fb8978bbf4aafe08f21234e161dd128b417 upstream. The hardware always writes one or two bytes in the index portion of an indexed transfer. Make sure the message we send as the index doesn't have a zero length. Cc: Daniel Kurtz Cc: Chris Wilson Cc: Daniel Vetter Cc: Sean Paul Fixes: 56f9eac05489 ("drm/i915/intel_i2c: use INDEX cycles for i2c read transactions") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20171123194157.25367-3-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson (cherry picked from commit bb9e0d4bca50f429152e74a459160b41f3d60fb2) Signed-off-by: Joonas Lahtinen Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_i2c.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 8affd47b98b814..49fdf09f9919c8 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -439,7 +439,8 @@ gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) { return (i + 1 < num && msgs[i].addr == msgs[i + 1].addr && - !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 && + !(msgs[i].flags & I2C_M_RD) && + (msgs[i].len == 1 || msgs[i].len == 2) && (msgs[i + 1].flags & I2C_M_RD)); } From e49d722f26b904015077655bff47cdbde049226a Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 4 Dec 2017 12:59:57 +0100 Subject: [PATCH 0344/3326] Revert "x86/entry/64: Add missing irqflags tracing to native_load_gs_index()" This reverts commit f9a64e23a9da528e7d8aa1bd2c7bb92be4ebb724 which is commit 0d794d0d018f23fb09c50f6ae26868bd6ae343d6 upstream. Andy writes: I think the thing to do is to revert the patch from -stable. The bug it fixes is very minor, and the regression is that it made a pre-existing bug in some nearly-undebuggable core resume code much easier to hit. I don't feel comfortable with a backport of the latter fix until it has a good long soak in Linus' tree. Reported-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 518d9286b3d116..2e956afe272c09 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -51,19 +51,15 @@ ENTRY(native_usergs_sysret64) END(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ -.macro TRACE_IRQS_FLAGS flags:req +.macro TRACE_IRQS_IRETQ #ifdef CONFIG_TRACE_IRQFLAGS - bt $9, \flags /* interrupts off? */ + bt $9, EFLAGS(%rsp) /* interrupts off? */ jnc 1f TRACE_IRQS_ON 1: #endif .endm -.macro TRACE_IRQS_IRETQ - TRACE_IRQS_FLAGS EFLAGS(%rsp) -.endm - /* * When dynamic function tracer is enabled it will add a breakpoint * to all locations that it is about to modify, sync CPUs, update @@ -927,13 +923,11 @@ ENTRY(native_load_gs_index) FRAME_BEGIN pushfq DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) - TRACE_IRQS_OFF SWAPGS .Lgs_change: movl %edi, %gs 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE SWAPGS - TRACE_IRQS_FLAGS (%rsp) popfq FRAME_END ret From 51a2a68fde2035887c0d74aee1c9569c691dfd61 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 5 Dec 2017 11:26:38 +0100 Subject: [PATCH 0345/3326] Linux 4.14.4 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ede4de0d86344a..ba1648c093fea2 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 3 +SUBLEVEL = 4 EXTRAVERSION = NAME = Petit Gorille From c8111b1885d3a802e57f895e77aaa1e88de57282 Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Thu, 9 Nov 2017 15:39:56 +0100 Subject: [PATCH 0346/3326] drm/fsl-dcu: avoid disabling pixel clock twice on suspend commit 9306e996574f7f57136a62e49cd0075f85713623 upstream. With commit 0a70c998d0c5 ("drm/fsl-dcu: enable pixel clock when enabling CRTC") the pixel clock is controlled by the CRTC code. Disabling the pixel clock in suspend leads to a warning due to the second clk_disable_unprepare call: WARNING: CPU: 0 PID: 359 at drivers/clk/clk.c:594 clk_core_disable+0x8c/0x90 Remove clk_disable_unprepare call for pixel clock to avoid unbalanced clock disable on suspend. Fixes: 0a70c998d0c5 ("drm/fsl-dcu: enable pixel clock when enabling CRTC") Signed-off-by: Stefan Agner Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 58e9e0601a616b..90ad509c7c453c 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -210,7 +210,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev) return PTR_ERR(fsl_dev->state); } - clk_disable_unprepare(fsl_dev->pix_clk); clk_disable_unprepare(fsl_dev->clk); return 0; From bacbe44889828852678bc74ad0e942a7684f206f Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Fri, 10 Nov 2017 10:15:28 +0100 Subject: [PATCH 0347/3326] drm/fsl-dcu: enable IRQ before drm_atomic_helper_resume() commit 9fd99f4f3f5e13ce959900ae57d64b1bdb51d823 upstream. The resume helpers wait for a vblank to occurre hence IRQ need to be enabled. This avoids a warning as follows during resume: WARNING: CPU: 0 PID: 314 at drivers/gpu/drm/drm_atomic_helper.c:1249 drm_atomic_helper_wait_for_vblanks.part.1+0x284/0x288 [CRTC:28:crtc-0] vblank wait timed out Signed-off-by: Stefan Agner Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 90ad509c7c453c..faf17b83b910df 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -232,6 +232,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_dcu_drm_init_planes(fsl_dev->drm); + enable_irq(fsl_dev->irq); drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); console_lock(); @@ -239,7 +240,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) console_unlock(); drm_kms_helper_poll_enable(fsl_dev->drm); - enable_irq(fsl_dev->irq); return 0; } From 955c907b978589fbd34e9eaaf53527d8cbf589b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Wed, 22 Nov 2017 15:55:21 +0100 Subject: [PATCH 0348/3326] drm/amdgpu: Use unsigned ring indices in amdgpu_queue_mgr_map MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit fa7c7939b4bf112cd06ba166b739244077898990 upstream. This matches the corresponding UAPI fields. Treating the ring index as signed could result in accessing random unrelated memory if the MSB was set. Fixes: effd924d2f3b ("drm/amdgpu: untie user ring ids from kernel ring ids v6") Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 87801faaf2648a..712ad8c2bdc5d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -697,7 +697,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, struct amdgpu_queue_mgr *mgr); int amdgpu_queue_mgr_map(struct amdgpu_device *adev, struct amdgpu_queue_mgr *mgr, - int hw_ip, int instance, int ring, + u32 hw_ip, u32 instance, u32 ring, struct amdgpu_ring **out_ring); /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c index befc09b68543cf..b293380bd46c2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c @@ -63,7 +63,7 @@ static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper, static int amdgpu_identity_map(struct amdgpu_device *adev, struct amdgpu_queue_mapper *mapper, - int ring, + u32 ring, struct amdgpu_ring **out_ring) { switch (mapper->hw_ip) { @@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip) static int amdgpu_lru_map(struct amdgpu_device *adev, struct amdgpu_queue_mapper *mapper, - int user_ring, + u32 user_ring, struct amdgpu_ring **out_ring) { int r, i, j; @@ -208,7 +208,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, */ int amdgpu_queue_mgr_map(struct amdgpu_device *adev, struct amdgpu_queue_mgr *mgr, - int hw_ip, int instance, int ring, + u32 hw_ip, u32 instance, u32 ring, struct amdgpu_ring **out_ring) { int r, ip_num_rings; From b8e212c599082896a180a18a0c9bd529526590be Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 11 Sep 2017 11:24:22 +0200 Subject: [PATCH 0349/3326] s390/runtime instrumentation: simplify task exit handling commit 8d9047f8b967ce6181fd824ae922978e1b055cc0 upstream. Free data structures required for runtime instrumentation from arch_release_task_struct(). This allows to simplify the code a bit, and also makes the semantics a bit easier: arch_release_task_struct() is never called from the task that is being removed. In addition this allows to get rid of exit_thread() in a later patch. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- arch/s390/include/asm/runtime_instr.h | 4 +++- arch/s390/kernel/process.c | 5 ++--- arch/s390/kernel/runtime_instr.c | 30 +++++++++++++-------------- 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h index ea8896ba5afc77..2502d05403ef4e 100644 --- a/arch/s390/include/asm/runtime_instr.h +++ b/arch/s390/include/asm/runtime_instr.h @@ -86,6 +86,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next, load_runtime_instr_cb(&runtime_instr_empty_cb); } -void exit_thread_runtime_instr(void); +struct task_struct; + +void runtime_instr_release(struct task_struct *tsk); #endif /* _RUNTIME_INSTR_H */ diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 203b7cd7c34896..7d4c5500c6c214 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -49,10 +49,8 @@ extern void kernel_thread_starter(void); */ void exit_thread(struct task_struct *tsk) { - if (tsk == current) { - exit_thread_runtime_instr(); + if (tsk == current) exit_thread_gs(); - } } void flush_thread(void) @@ -65,6 +63,7 @@ void release_thread(struct task_struct *dead_task) void arch_release_task_struct(struct task_struct *tsk) { + runtime_instr_release(tsk); } int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index d85c64821a6b1b..94c9ba72cf8354 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -21,11 +21,24 @@ /* empty control block to disable RI by loading it */ struct runtime_instr_cb runtime_instr_empty_cb; +void runtime_instr_release(struct task_struct *tsk) +{ + kfree(tsk->thread.ri_cb); +} + static void disable_runtime_instr(void) { - struct pt_regs *regs = task_pt_regs(current); + struct task_struct *task = current; + struct pt_regs *regs; + if (!task->thread.ri_cb) + return; + regs = task_pt_regs(task); + preempt_disable(); load_runtime_instr_cb(&runtime_instr_empty_cb); + kfree(task->thread.ri_cb); + task->thread.ri_cb = NULL; + preempt_enable(); /* * Make sure the RI bit is deleted from the PSW. If the user did not @@ -46,19 +59,6 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb) cb->valid = 1; } -void exit_thread_runtime_instr(void) -{ - struct task_struct *task = current; - - preempt_disable(); - if (!task->thread.ri_cb) - return; - disable_runtime_instr(); - kfree(task->thread.ri_cb); - task->thread.ri_cb = NULL; - preempt_enable(); -} - SYSCALL_DEFINE1(s390_runtime_instr, int, command) { struct runtime_instr_cb *cb; @@ -67,7 +67,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command) return -EOPNOTSUPP; if (command == S390_RUNTIME_INSTR_STOP) { - exit_thread_runtime_instr(); + disable_runtime_instr(); return 0; } From e0264c662ca8ba118715072927af08d96fd90a36 Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Wed, 29 Nov 2017 15:24:22 -0700 Subject: [PATCH 0350/3326] usbip: fix usbip attach to find a port that matches the requested speed commit 1ac7c8a78be85f84b019d3d2742d1a9f07255cc5 upstream. usbip attach fails to find a free port when the device on the first port is a USB_SPEED_SUPER device and non-super speed device is being attached. It keeps checking the first port and returns without a match getting stuck in a loop. Fix it check to find the first port with matching speed. Reported-by: Juan Zea Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- tools/usb/usbip/libsrc/vhci_driver.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c index 5727dfb15a83ef..8a1cd1616de419 100644 --- a/tools/usb/usbip/libsrc/vhci_driver.c +++ b/tools/usb/usbip/libsrc/vhci_driver.c @@ -329,9 +329,17 @@ int usbip_vhci_refresh_device_list(void) int usbip_vhci_get_free_port(uint32_t speed) { for (int i = 0; i < vhci_driver->nports; i++) { - if (speed == USB_SPEED_SUPER && - vhci_driver->idev[i].hub != HUB_SPEED_SUPER) - continue; + + switch (speed) { + case USB_SPEED_SUPER: + if (vhci_driver->idev[i].hub != HUB_SPEED_SUPER) + continue; + break; + default: + if (vhci_driver->idev[i].hub != HUB_SPEED_HIGH) + continue; + break; + } if (vhci_driver->idev[i].status == VDEV_ST_NULL) return vhci_driver->idev[i].port; From b76f8338123f06c565574a5f36964f482db829a3 Mon Sep 17 00:00:00 2001 From: Yuyang Du Date: Thu, 30 Nov 2017 10:22:40 +0800 Subject: [PATCH 0351/3326] usbip: Fix USB device hang due to wrong enabling of scatter-gather commit 770b2edece42fa55bbe7d4cbe53347a07b8968d4 upstream. The previous USB3 SuperSpeed enabling patches mistakenly enabled URB scatter-gather chaining, which is actually not supported by the VHCI HCD. This patch fixes that. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=197867 Fixes: 03cd00d538a6feb ("usbip: vhci-hcd: Set the vhci structure up to work") Reported-by: Juan Zea Signed-off-by: Yuyang Du Acked-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usbip/vhci_hcd.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 11b9a22799ccb3..1f0cf81cc14576 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -1112,7 +1112,6 @@ static int hcd_name_to_id(const char *name) static int vhci_setup(struct usb_hcd *hcd) { struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller)); - hcd->self.sg_tablesize = ~0; if (usb_hcd_is_primary_hcd(hcd)) { vhci->vhci_hcd_hs = hcd_to_vhci_hcd(hcd); vhci->vhci_hcd_hs->vhci = vhci; From b6e6e58814d5583bef5c75734324b27f77f70f1b Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 14 Nov 2017 19:27:22 +0100 Subject: [PATCH 0352/3326] uas: Always apply US_FL_NO_ATA_1X quirk to Seagate devices commit 7fee72d5e8f1e7b8d8212e28291b1a0243ecf2f1 upstream. We've been adding this as a quirk on a per device basis hoping that newer disk enclosures would do better, but that has not happened, so simply apply this quirk to all Seagate devices. Signed-off-by: Hans de Goede Signed-off-by: Greg Kroah-Hartman --- drivers/usb/storage/uas-detect.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index 1fcd758a961f5e..3734a25e09e539 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h @@ -112,6 +112,10 @@ static int uas_use_uas_driver(struct usb_interface *intf, } } + /* All Seagate disk enclosures have broken ATA pass-through support */ + if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2) + flags |= US_FL_NO_ATA_1X; + usb_stor_adjust_quirks(udev, &flags); if (flags & US_FL_IGNORE_UAS) { From 502ae7582aacc85ffd38fa820e4c734a230ccc25 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Tue, 14 Nov 2017 01:31:15 -0500 Subject: [PATCH 0353/3326] usb: quirks: Add no-lpm quirk for KY-688 USB 3.1 Type-C Hub commit e43a12f1793ae1fe006e26fe9327a8840a92233c upstream. KY-688 USB 3.1 Type-C Hub internally uses a Genesys Logic hub to connect to Realtek r8153. Similar to commit ("7496cfe5431f2 usb: quirks: Add no-lpm quirk for Moshi USB to Ethernet Adapter"), no-lpm can make r8153 ethernet work. Signed-off-by: Kai-Heng Feng Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/quirks.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 37c418e581fbe3..50010282c010f6 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -151,6 +151,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* appletouch */ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */ + { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM }, + /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, From 45c17d9b6ea66925c8aadbb2973c48d668f673fc Mon Sep 17 00:00:00 2001 From: Matt Wilson Date: Mon, 13 Nov 2017 11:31:31 -0800 Subject: [PATCH 0354/3326] serial: 8250_pci: Add Amazon PCI serial device ID commit 3bfd1300abfe3adb18e84a89d97a0e82a22124bb upstream. This device will be used in future Amazon EC2 instances as the primary serial port (i.e., data sent to this port will be available via the GetConsoleOuput [1] EC2 API). [1] http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetConsoleOutput.html Signed-off-by: Matt Wilson Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_pci.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 0c101a7470b0ee..d4e7be88e0da31 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -5137,6 +5137,9 @@ static const struct pci_device_id serial_pci_tbl[] = { { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 }, { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 }, + /* Amazon PCI serial device */ + { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 }, + /* * These entries match devices with class COMMUNICATION_SERIAL, * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL From 22ad53793edbc73a5a12f6960749bc967551fde4 Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Mon, 13 Nov 2017 10:06:08 +0100 Subject: [PATCH 0355/3326] ANDROID: binder: fix transaction leak. commit fb2c445277e7b0b4ffe10de8114bad4eccaca948 upstream. If a call to put_user() fails, we failed to properly free a transaction and send a failed reply (if necessary). Signed-off-by: Martijn Coenen Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 40 +++++++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index fddf76ef5bd6d8..88b4bbe58100d6 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -1947,6 +1947,26 @@ static void binder_send_failed_reply(struct binder_transaction *t, } } +/** + * binder_cleanup_transaction() - cleans up undelivered transaction + * @t: transaction that needs to be cleaned up + * @reason: reason the transaction wasn't delivered + * @error_code: error to return to caller (if synchronous call) + */ +static void binder_cleanup_transaction(struct binder_transaction *t, + const char *reason, + uint32_t error_code) +{ + if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { + binder_send_failed_reply(t, error_code); + } else { + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "undelivered transaction %d, %s\n", + t->debug_id, reason); + binder_free_transaction(t); + } +} + /** * binder_validate_object() - checks for a valid metadata object in a buffer. * @buffer: binder_buffer that we're parsing. @@ -4015,12 +4035,20 @@ static int binder_thread_read(struct binder_proc *proc, if (put_user(cmd, (uint32_t __user *)ptr)) { if (t_from) binder_thread_dec_tmpref(t_from); + + binder_cleanup_transaction(t, "put_user failed", + BR_FAILED_REPLY); + return -EFAULT; } ptr += sizeof(uint32_t); if (copy_to_user(ptr, &tr, sizeof(tr))) { if (t_from) binder_thread_dec_tmpref(t_from); + + binder_cleanup_transaction(t, "copy_to_user failed", + BR_FAILED_REPLY); + return -EFAULT; } ptr += sizeof(tr); @@ -4090,15 +4118,9 @@ static void binder_release_work(struct binder_proc *proc, struct binder_transaction *t; t = container_of(w, struct binder_transaction, work); - if (t->buffer->target_node && - !(t->flags & TF_ONE_WAY)) { - binder_send_failed_reply(t, BR_DEAD_REPLY); - } else { - binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, - "undelivered transaction %d\n", - t->debug_id); - binder_free_transaction(t); - } + + binder_cleanup_transaction(t, "process died.", + BR_DEAD_REPLY); } break; case BINDER_WORK_RETURN_ERROR: { struct binder_error *e = container_of( From b200d9899afe672d66c0d8d618c63455d21913ea Mon Sep 17 00:00:00 2001 From: Sebastian Sjoholm Date: Mon, 20 Nov 2017 19:29:32 +0100 Subject: [PATCH 0356/3326] USB: serial: option: add Quectel BG96 id commit c654b21ede93845863597de9ad774fd30db5f2ab upstream. Quectel BG96 is an Qualcomm MDM9206 based IoT modem, supporting both CAT-M and NB-IoT. Tested hardware is BG96 mounted on Quectel development board (EVB). The USB id is added to option.c to allow DIAG,GPS,AT and modem communication with the BG96. Signed-off-by: Sebastian Sjoholm Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index ba672cf4e888cf..54e316b1892d73 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb); /* These Quectel products use Quectel's vendor ID */ #define QUECTEL_PRODUCT_EC21 0x0121 #define QUECTEL_PRODUCT_EC25 0x0125 +#define QUECTEL_PRODUCT_BG96 0x0296 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -1185,6 +1186,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), From 2a53dae2e338bcd32d8fac11ee87c01ab873fec3 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 28 Nov 2017 12:40:59 +0800 Subject: [PATCH 0357/3326] USB: serial: usb_debug: add new USB device id commit 762ff4678e89a5e3f8b2237533e04d3ef2737e78 upstream. USB vendor id and product id for Linux USB Debug Target is added. Signed-off-by: Lu Baolu Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/usb_debug.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c index 48f285a1ad00bd..c593ca8800e591 100644 --- a/drivers/usb/serial/usb_debug.c +++ b/drivers/usb/serial/usb_debug.c @@ -34,12 +34,14 @@ static const struct usb_device_id id_table[] = { }; static const struct usb_device_id dbc_id_table[] = { + { USB_DEVICE(0x1d6b, 0x0010) }, { USB_DEVICE(0x1d6b, 0x0011) }, { }, }; static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(0x0525, 0x127a) }, + { USB_DEVICE(0x1d6b, 0x0010) }, { USB_DEVICE(0x1d6b, 0x0011) }, { }, }; From 64dbff3359fc3865a000d051143ade71aea1242b Mon Sep 17 00:00:00 2001 From: Matt Redfearn Date: Wed, 22 Nov 2017 09:57:28 +0000 Subject: [PATCH 0358/3326] serial: 8250_early: Only set divisor if valid clk & baud commit 0ff3ab701963a845d52337ded7eebf2d1a14fe00 upstream. If either uartclk or baud are 0, avoid calculating and setting a divisor based on them since the output will almost certainly be garbage. This also allows platforms such as the MIPS generic kernel, which has no way to know a valid BASE_BASE for the board it is actually booted on at compile time, to set BASE_BAUD to 0 and avoid early_8250 setting a bad divisor. This fixes a regression caused by commit 31cb9a8575ca ("earlycon: initialise baud field of earlycon device structure"), which changed the behavior of of_setup_earlycon such that it sets a baud rate in the earlycon structure where previously it was left as 0. All boards supported by the MIPS generic kernel started outputting garbage from the boot console due to an incorrect divisor being set. Fixes: 31cb9a8575ca ("earlycon: initialise baud field of earlycon device structure") Signed-off-by: Matt Redfearn Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_early.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c index af72ec32e40473..f135c1846477e4 100644 --- a/drivers/tty/serial/8250/8250_early.c +++ b/drivers/tty/serial/8250/8250_early.c @@ -125,12 +125,14 @@ static void __init init_port(struct earlycon_device *device) serial8250_early_out(port, UART_FCR, 0); /* no fifo */ serial8250_early_out(port, UART_MCR, 0x3); /* DTR + RTS */ - divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud); - c = serial8250_early_in(port, UART_LCR); - serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB); - serial8250_early_out(port, UART_DLL, divisor & 0xff); - serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff); - serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB); + if (port->uartclk && device->baud) { + divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud); + c = serial8250_early_in(port, UART_LCR); + serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB); + serial8250_early_out(port, UART_DLL, divisor & 0xff); + serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff); + serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB); + } } int __init early_serial8250_setup(struct earlycon_device *device, From 07c925116820b3def5610a917cad3901d69d18d9 Mon Sep 17 00:00:00 2001 From: Matt Redfearn Date: Tue, 28 Nov 2017 15:22:20 +0000 Subject: [PATCH 0359/3326] MIPS: Add custom serial.h with BASE_BAUD override for generic kernel commit c8ec2041f549e7f2dee0c34d25381be6f7805f99 upstream. Add a custom serial.h header for MIPS, allowing platforms to override the asm-generic version if required. The generic platform uses this header to set BASE_BAUD to 0. The generic platform supports multiple boards, which may have different UART clocks. Also one of the boards supported is the Boston FPGA board, where the UART clock depends on the loaded FPGA bitfile. As such there is no way that the generic kernel can set a compile time default BASE_BAUD. Commit 31cb9a8575ca ("earlycon: initialise baud field of earlycon device structure") changed the behavior of of_setup_earlycon such that any baud rate set in the device tree is now set in the earlycon structure. The UART driver will then calculate a divisor based on BASE_BAUD and set it. With MIPS generic kernels this resulted in garbage output due to the incorrect uart clock rate being used to calculate a divisor. This commit, combined with "serial: 8250_early: Only set divisor if valid clk & baud" prevents the earlycon code setting a bad divisor and restores earlycon output. Fixes: 31cb9a8575ca ("earlycon: initialise baud field of earlycon device structure") Signed-off-by: Matt Redfearn Signed-off-by: Greg Kroah-Hartman --- arch/mips/include/asm/Kbuild | 1 - arch/mips/include/asm/serial.h | 22 ++++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 arch/mips/include/asm/serial.h diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 7c8aab23bce8da..b1f66699677dba 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += qrwlock.h generic-y += qspinlock.h generic-y += sections.h generic-y += segment.h -generic-y += serial.h generic-y += trace_clock.h generic-y += unaligned.h generic-y += user.h diff --git a/arch/mips/include/asm/serial.h b/arch/mips/include/asm/serial.h new file mode 100644 index 00000000000000..1d830c6666c273 --- /dev/null +++ b/arch/mips/include/asm/serial.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2017 MIPS Tech, LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __ASM__SERIAL_H +#define __ASM__SERIAL_H + +#ifdef CONFIG_MIPS_GENERIC +/* + * Generic kernels cannot know a correct value for all platforms at + * compile time. Set it to 0 to prevent 8250_early using it + */ +#define BASE_BAUD 0 +#else +#include +#endif + +#endif /* __ASM__SERIAL_H */ From 7c503475ae2aa08c10e50e56c9e12efe6415eefa Mon Sep 17 00:00:00 2001 From: Boshi Wang Date: Fri, 20 Oct 2017 16:01:03 +0800 Subject: [PATCH 0360/3326] ima: fix hash algorithm initialization [ Upstream commit ebe7c0a7be92bbd34c6ff5b55810546a0ee05bee ] The hash_setup function always sets the hash_setup_done flag, even when the hash algorithm is invalid. This prevents the default hash algorithm defined as CONFIG_IMA_DEFAULT_HASH from being used. This patch sets hash_setup_done flag only for valid hash algorithms. Fixes: e7a2ad7eb6f4 "ima: enable support for larger default filedata hash algorithms" Signed-off-by: Boshi Wang Signed-off-by: Mimi Zohar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- security/integrity/ima/ima_main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 2aebb7984437f1..ab70a395f4903e 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -51,6 +51,8 @@ static int __init hash_setup(char *str) ima_hash_algo = HASH_ALGO_SHA1; else if (strncmp(str, "md5", 3) == 0) ima_hash_algo = HASH_ALGO_MD5; + else + return 1; goto out; } @@ -60,6 +62,8 @@ static int __init hash_setup(char *str) break; } } + if (i == HASH_ALGO__LAST) + return 1; out: hash_setup_done = 1; return 1; From f0bf7b7396a474cf3d61ebd7669bb20f914289fc Mon Sep 17 00:00:00 2001 From: "Jason J. Herne" Date: Tue, 7 Nov 2017 10:22:32 -0500 Subject: [PATCH 0361/3326] s390: vfio-ccw: Do not attempt to free no-op, test and tic cda. [ Upstream commit 408358b50deaf59b07c82a7bff8c7e7cce031fae ] Because we do not make use of the cda (channel data address) for test, no-op ccws no address translation takes place. This means cda could contain a guest address which we do not want to attempt to free. Let's check the command type and skip cda free when it is not needed. For a TIC ccw, ccw->cda points to either a ccw in an existing chain or it points to a whole new allocated chain. In either case the data will be freed when the owning chain is freed. Signed-off-by: Jason J. Herne Reviewed-by: Dong Jia Shi Reviewed-by: Pierre Morel Message-Id: <1510068152-21988-1-git-send-email-jjherne@linux.vnet.ibm.com> Reviewed-by: Halil Pasic Acked-by: Christian Borntraeger Signed-off-by: Cornelia Huck Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/s390/cio/vfio_ccw_cp.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index f20b4d66c75f7f..4a39b54732d031 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -330,6 +330,8 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx) { struct ccw1 *ccw = chain->ch_ccw + idx; + if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw)) + return; if (!ccw->count) return; From ca04b90f9d8e053e81f630610076d00239129c1f Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 8 Nov 2017 10:11:02 +0100 Subject: [PATCH 0362/3326] PM / Domains: Fix genpd to deal with drivers returning 1 from ->prepare() [ Upstream commit 5241ab40f6e742f8a1631f8826faf6dc6412b3b5 ] During system-wide PM, genpd relies on its PM callbacks to be invoked for all its attached devices, as to deal with powering off/on the PM domain. In other words, genpd is not compatible with the direct_complete path, if executed by the PM core for any of its attached devices. However, when genpd's ->prepare() callback invokes pm_generic_prepare(), it does not take into account that it may return 1. Instead it treats that as an error internally and expects the PM core to abort the prepare phase and roll back. This leads to genpd not properly powering on/off the PM domain, because its internal counters gets wrongly balanced. To fix the behaviour, allow drivers to return 1 from their ->prepare() callbacks, but let's return 0 from genpd's ->prepare() callback in such case, as that prevents the PM core from running the direct_complete path for the device. Signed-off-by: Ulf Hansson Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/base/power/domain.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e8ca5e2cf1e51e..70f8904f46a31d 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -921,7 +921,7 @@ static int pm_genpd_prepare(struct device *dev) genpd_unlock(genpd); ret = pm_generic_prepare(dev); - if (ret) { + if (ret < 0) { genpd_lock(genpd); genpd->prepared_count--; @@ -929,7 +929,8 @@ static int pm_genpd_prepare(struct device *dev) genpd_unlock(genpd); } - return ret; + /* Never return 1, as genpd don't cope with the direct_complete path. */ + return ret >= 0 ? 0 : ret; } /** From 3dde14f7f78b337d43fc1a6877cb283023121f2f Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Mon, 30 Oct 2017 14:38:58 +0100 Subject: [PATCH 0363/3326] s390/pci: do not require AIS facility [ Upstream commit 48070c73058be6de9c0d754d441ed7092dfc8f12 ] As of today QEMU does not provide the AIS facility to its guest. This prevents Linux guests from using PCI devices as the ais facility is checked during init. As this is just a performance optimization, we can move the ais check into the code where we need it (calling the SIC instruction). This is used at initialization and on interrupt. Both places do not require any serialization, so we can simply skip the instruction. Since we will now get all interrupts, we can also avoid the 2nd scan. As we can have multiple interrupts in parallel we might trigger spurious irqs more often for the non-AIS case but the core code can handle that. Signed-off-by: Christian Borntraeger Reviewed-by: Pierre Morel Reviewed-by: Halil Pasic Acked-by: Sebastian Ott Signed-off-by: Heiko Carstens Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/s390/include/asm/pci_insn.h | 2 +- arch/s390/pci/pci.c | 5 +++-- arch/s390/pci/pci_insn.c | 6 +++++- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index 419e83fa47217f..ba22a6ea51a144 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h @@ -82,6 +82,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range); int zpci_load(u64 *data, u64 req, u64 offset); int zpci_store(u64 data, u64 req, u64 offset); int zpci_store_block(const u64 *data, u64 req, u64 offset); -void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc); +int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc); #endif diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index a25d95a6612dd9..0fe649c0d5423a 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -368,7 +368,8 @@ static void zpci_irq_handler(struct airq_struct *airq) /* End of second scan with interrupts on. */ break; /* First scan complete, reenable interrupts. */ - zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); + if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC)) + break; si = 0; continue; } @@ -956,7 +957,7 @@ static int __init pci_base_init(void) if (!s390_pci_probe) return 0; - if (!test_facility(69) || !test_facility(71) || !test_facility(72)) + if (!test_facility(69) || !test_facility(71)) return 0; rc = zpci_debug_init(); diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c index ea34086c86744e..81b840bc6e4e73 100644 --- a/arch/s390/pci/pci_insn.c +++ b/arch/s390/pci/pci_insn.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -91,11 +92,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range) } /* Set Interruption Controls */ -void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc) +int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc) { + if (!test_facility(72)) + return -EIO; asm volatile ( " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused)); + return 0; } /* PCI Load */ From 9aaa793b6b05b57cf37814ccf0d992035ea41407 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sat, 4 Nov 2017 04:19:52 -0700 Subject: [PATCH 0364/3326] selftests/x86/ldt_get: Add a few additional tests for limits [ Upstream commit fec8f5ae1715a01c72ad52cb2ecd8aacaf142302 ] We weren't testing the .limit and .limit_in_pages fields very well. Add more tests. This addition seems to trigger the "bits 16:19 are undefined" issue that was fixed in an earlier patch. I think that, at least on my CPU, the high nibble of the limit ends in LAR bits 16:19. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/5601c15ea9b3113d288953fd2838b18bedf6bc67.1509794321.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/x86/ldt_gdt.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c index 961e3ee26c2722..b1779da724281c 100644 --- a/tools/testing/selftests/x86/ldt_gdt.c +++ b/tools/testing/selftests/x86/ldt_gdt.c @@ -367,9 +367,24 @@ static void do_simple_tests(void) install_invalid(&desc, false); desc.seg_not_present = 0; - desc.read_exec_only = 0; desc.seg_32bit = 1; + desc.read_exec_only = 0; + desc.limit = 0xfffff; + install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB); + + desc.limit_in_pages = 1; + + install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB | AR_G); + desc.read_exec_only = 1; + install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P | AR_DB | AR_G); + desc.contents = 1; + desc.read_exec_only = 0; + install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G); + desc.read_exec_only = 1; + install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G); + + desc.limit = 0; install_invalid(&desc, true); } From 898fe968f78c45e12fed31aee35a118a4e26e1b4 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sat, 4 Nov 2017 04:19:49 -0700 Subject: [PATCH 0365/3326] selftests/x86/ldt_gdt: Robustify against set_thread_area() and LAR oddities [ Upstream commit d60ad744c9741586010d4bea286f09a063a90fbd ] Bits 19:16 of LAR's result are undefined, and some upcoming improvements to the test case seem to trigger this. Mask off those bits to avoid spurious failures. commit 5b781c7e317f ("x86/tls: Forcibly set the accessed bit in TLS segments") adds a valid case in which LAR's output doesn't quite agree with set_thread_area()'s input. This isn't triggered in the test as is, but it will be if we start calling set_thread_area() with the accessed bit clear. Work around this discrepency. I've added a Fixes tag so that -stable can pick this up if neccesary. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 5b781c7e317f ("x86/tls: Forcibly set the accessed bit in TLS segments") Link: http://lkml.kernel.org/r/b82f3f89c034b53580970ac865139fd8863f44e2.1509794321.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/x86/ldt_gdt.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c index b1779da724281c..2afc41a3730fb4 100644 --- a/tools/testing/selftests/x86/ldt_gdt.c +++ b/tools/testing/selftests/x86/ldt_gdt.c @@ -115,7 +115,15 @@ static void check_valid_segment(uint16_t index, int ldt, return; } - if (ar != expected_ar) { + /* The SDM says "bits 19:16 are undefined". Thanks. */ + ar &= ~0xF0000; + + /* + * NB: Different Linux versions do different things with the + * accessed bit in set_thread_area(). + */ + if (ar != expected_ar && + (ldt || ar != (expected_ar | AR_ACCESSED))) { printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n", (ldt ? "LDT" : "GDT"), index, ar, expected_ar); nerrs++; From ff8cc6f882c735df53ae2e2fdde8362cafe14100 Mon Sep 17 00:00:00 2001 From: Bryan O'Donoghue Date: Mon, 6 Nov 2017 01:32:20 +0000 Subject: [PATCH 0366/3326] staging: greybus: loopback: Fix iteration count on async path [ Upstream commit 44b02da39210e6dd67e39ff1f48d30c56d384240 ] Commit 12927835d211 ("greybus: loopback: Add asynchronous bi-directional support") does what it says on the tin - namely, adds support for asynchronous bi-directional loopback operations. What it neglects to do though is increment the per-connection gb->iteration_count on an asynchronous operation error. This patch fixes that omission. Fixes: 12927835d211 ("greybus: loopback: Add asynchronous bi-directional support") Signed-off-by: Bryan O'Donoghue Reported-by: Mitch Tasman Reviewed-by: Johan Hovold Cc: Alex Elder Cc: Mitch Tasman Cc: greybus-dev@lists.linaro.org Cc: devel@driverdev.osuosl.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/greybus/loopback.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c index 08e2558842061f..93e86798ec1c26 100644 --- a/drivers/staging/greybus/loopback.c +++ b/drivers/staging/greybus/loopback.c @@ -1042,8 +1042,10 @@ static int gb_loopback_fn(void *data) else if (type == GB_LOOPBACK_TYPE_SINK) error = gb_loopback_async_sink(gb, size); - if (error) + if (error) { gb->error++; + gb->iteration_count++; + } } else { /* We are effectively single threaded here */ if (type == GB_LOOPBACK_TYPE_PING) From 162cefc5547b95f24c1a2aad2af1817381afa6a6 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Tue, 5 Sep 2017 22:57:06 +1000 Subject: [PATCH 0367/3326] m68k: fix ColdFire node shift size calculation [ Upstream commit f55ab8f27548ff3431a6567d400c6757c49fd520 ] The m68k pg_data_table is a fix size array defined in arch/m68k/mm/init.c. Index numbers within it are defined based on memory size. But for Coldfire these don't take into account a non-zero physical RAM base address, and this causes us to access past the end of this array at system start time. Change the node shift calculation so that we keep the index inside its range. Reported-by: Angelo Dureghello Tested-by: Angelo Dureghello Signed-off-by: Greg Ungerer Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/m68k/mm/mcfmmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 8d1408583cf42c..b523a604cb87b1 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -170,7 +170,7 @@ void __init cf_bootmem_alloc(void) max_pfn = max_low_pfn = PFN_DOWN(_ramend); high_memory = (void *)_ramend; - m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6; + m68k_virt_to_node_shift = fls(_ramend - 1) - 6; module_fixup(NULL, __start_fixup, __stop_fixup); /* setup bootmem data */ From 4bee9d1d1c54c8bf9ff8149ccacbaf398f524a37 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 28 Oct 2017 11:35:49 +0200 Subject: [PATCH 0368/3326] serial: 8250_fintek: Fix rs485 disablement on invalid ioctl() [ Upstream commit 3236a965486ba0c6043cf2c7b51943d8b382ae29 ] This driver's ->rs485_config callback checks if SER_RS485_RTS_ON_SEND and SER_RS485_RTS_AFTER_SEND have the same value. If they do, it means the user has passed in invalid data with the TIOCSRS485 ioctl() since RTS must have a different polarity when sending and when not sending. In this case, rs485 mode is not enabled (the RS485_URA bit is not set in the RS485 Enable Register) and this is supposed to be signaled back to the user by clearing the SER_RS485_ENABLED bit in struct serial_rs485 ... except a missing tilde character is preventing that from happening. Fixes: 28e3fb6c4dce ("serial: Add support for Fintek F81216A LPC to 4 UART") Cc: Ricardo Ribalda Delgado Cc: "Ji-Ze Hong (Peter Hong)" Signed-off-by: Lukas Wunner Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_fintek.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c index 4bd376c08b59f5..ba4af5434b9177 100644 --- a/drivers/tty/serial/8250/8250_fintek.c +++ b/drivers/tty/serial/8250/8250_fintek.c @@ -211,7 +211,7 @@ static int fintek_8250_rs485_config(struct uart_port *port, if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) == (!!(rs485->flags & SER_RS485_RTS_AFTER_SEND))) - rs485->flags &= SER_RS485_ENABLED; + rs485->flags &= ~SER_RS485_ENABLED; else config |= RS485_URA; From a99611910dd745e940fc1a0cdab15aadf07c3e23 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 23 Oct 2017 11:35:59 +0200 Subject: [PATCH 0369/3326] staging: rtl8822be: fix wrong dma unmap len [ Upstream commit c40a45a465e9eab72cfdd3ab69d15cf8ef8b89c8 ] Patch fixes splat: r8822be 0000:04:00.0: DMA-API: device driver frees DMA memory with different size [device address=0x0000000078477000] [map size=4096 bytes] [unmap size=424 bytes] Call Trace: debug_dma_unmap_page+0xa5/0xb0 ? unmap_single+0x2f/0x40 _rtl8822be_send_bcn_or_cmd_packet+0x2c5/0x300 [r8822be] ? _rtl8822be_send_bcn_or_cmd_packet+0x2c5/0x300 [r8822be] rtl8822b_halmac_cb_write_data_rsvd_page+0x51/0xc0 [r8822be] _halmac_write_data_rsvd_page+0x22/0x30 [r8822be] halmac_download_rsvd_page_88xx+0xee/0x1f0 [r8822be] halmac_dlfw_to_mem_88xx+0x80/0x120 [r8822be] halmac_download_firmware_88xx.part.47+0x477/0x600 [r8822be] halmac_download_firmware_88xx+0x32/0x40 [r8822be] rtl_halmac_dlfw+0x70/0x120 [r8822be] rtl_halmac_init_hal+0x5f/0x1b0 [r8822be] rtl8822be_hw_init+0x8a2/0x1040 [r8822be] Signed-off-by: Stanislaw Gruszka Acked-by: Larry Finger Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtlwifi/rtl8822be/fw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c index 8e24da16752c23..a2cc54866e79cf 100644 --- a/drivers/staging/rtlwifi/rtl8822be/fw.c +++ b/drivers/staging/rtlwifi/rtl8822be/fw.c @@ -419,7 +419,7 @@ static bool _rtl8822be_send_bcn_or_cmd_packet(struct ieee80211_hw *hw, dma_addr = rtlpriv->cfg->ops->get_desc( hw, (u8 *)pbd_desc, true, HW_DESC_TXBUFF_ADDR); - pci_unmap_single(rtlpci->pdev, dma_addr, skb->len, + pci_unmap_single(rtlpci->pdev, dma_addr, pskb->len, PCI_DMA_TODEVICE); kfree_skb(pskb); From e3912c2aef4ea046568ace227aa5d54bad44cac9 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 20 Oct 2017 20:40:24 +0200 Subject: [PATCH 0370/3326] staging: rtl8188eu: avoid a null dereference on pmlmepriv [ Upstream commit 123c0aab0050cd0e07ce18e453389fbbb0a5a425 ] There is a check on pmlmepriv before dereferencing it when vfree'ing pmlmepriv->free_bss_buf however the previous call to rtw_free_mlme_priv_ie_data deferences pmlmepriv causing a null pointer deference if it is null. Avoid this by also calling rtw_free_mlme_priv_ie_data if the pointer is non-null. Detected by CoverityScan, CID#1230262 ("Dereference before null check") Fixes: 7b464c9fa5cc ("staging: r8188eu: Add files for new driver - part 4") Signed-off-by: Colin Ian King Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtl8188eu/core/rtw_mlme.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c index f663e6c41f8aee..f6d71587b803bb 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c @@ -106,10 +106,10 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv) void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv) { - rtw_free_mlme_priv_ie_data(pmlmepriv); - - if (pmlmepriv) + if (pmlmepriv) { + rtw_free_mlme_priv_ie_data(pmlmepriv); vfree(pmlmepriv->free_bss_buf); + } } struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv) From d9bf23504e6521af8cbce108761eb22fe9b63136 Mon Sep 17 00:00:00 2001 From: Hiromitsu Yamasaki Date: Thu, 2 Nov 2017 10:32:36 +0100 Subject: [PATCH 0371/3326] spi: sh-msiof: Fix DMA transfer size check [ Upstream commit 36735783fdb599c94b9c86824583df367c65900b ] DMA supports 32-bit words only, even if BITLEN1 of SITMDR2 register is 16bit. Fixes: b0d0ce8b6b91 ("spi: sh-msiof: Add DMA support") Signed-off-by: Hiromitsu Yamasaki Signed-off-by: Simon Horman Acked-by: Geert Uytterhoeven Acked-by: Dirk Behme Signed-off-by: Mark Brown Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/spi/spi-sh-msiof.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 0eb1e95834854f..837bb95eea623f 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -900,7 +900,7 @@ static int sh_msiof_transfer_one(struct spi_master *master, break; copy32 = copy_bswap32; } else if (bits <= 16) { - if (l & 1) + if (l & 3) break; copy32 = copy_wswap32; } else { From 4507d57777ff432efa66a3d212f35a6036015298 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 30 Oct 2017 11:35:27 +0100 Subject: [PATCH 0372/3326] spi: spi-axi: fix potential use-after-free after deregistration [ Upstream commit 4d5e0689dc9d5640ad46cdfbe1896b74d8df1661 ] Take an extra reference to the controller before deregistering it to prevent use-after-free in the interrupt handler in case an interrupt fires before the line is disabled. Fixes: b1353d1c1d45 ("spi: Add Analog Devices AXI SPI Engine controller support") Acked-by: Lars-Peter Clausen Signed-off-by: Johan Hovold Signed-off-by: Mark Brown Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/spi/spi-axi-spi-engine.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index 6ab4c770022882..68cfc351b47f6e 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -553,7 +553,7 @@ static int spi_engine_probe(struct platform_device *pdev) static int spi_engine_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); struct spi_engine *spi_engine = spi_master_get_devdata(master); int irq = platform_get_irq(pdev, 0); @@ -561,6 +561,8 @@ static int spi_engine_remove(struct platform_device *pdev) free_irq(irq, master); + spi_master_put(master); + writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET); From 2ef27d564261b2ae9d7c39f9993da8dace2cb99b Mon Sep 17 00:00:00 2001 From: Fabrizio Castro Date: Fri, 22 Sep 2017 12:22:17 +0100 Subject: [PATCH 0373/3326] mmc: tmio: check mmc_regulator_get_supply return value [ Upstream commit a3d95d1d4007b1fefd6d8b12db26fda05de05cfb ] mmc_regulator_get_supply returns -EPROBE_DEFER if either vmmc or vqmmc regulators had their probing deferred. vqmmc regulator is needed by UHS to work properly, therefore this patch checks the value returned by mmc_regulator_get_supply to make sure we have a reference to both vmmc and vqmmc (if found in the DT). Signed-off-by: Fabrizio Castro Reviewed-by: Wolfram Sang Tested-by: Wolfram Sang Signed-off-by: Ulf Hansson Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/tmio_mmc_core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 9c4e6199b854d3..3a6d49f07e22d7 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1113,8 +1113,11 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) { struct tmio_mmc_data *pdata = host->pdata; struct mmc_host *mmc = host->mmc; + int err; - mmc_regulator_get_supply(mmc); + err = mmc_regulator_get_supply(mmc); + if (err) + return err; /* use ocr_mask if no regulator */ if (!mmc->ocr_avail) From 69d02547cbdf3d571f0aba394ce4b90b5daf6b71 Mon Sep 17 00:00:00 2001 From: Subhash Jadavani Date: Wed, 27 Sep 2017 11:04:40 +0530 Subject: [PATCH 0374/3326] mmc: sdhci-msm: fix issue with power irq [ Upstream commit c7ccee224d2d551f712752c4a16947f6529d6506 ] SDCC controller reset (SW_RST) during probe may trigger power irq if previous status of PWRCTL was either BUS_ON or IO_HIGH_V. So before we enable the power irq interrupt in GIC (by registering the interrupt handler), we need to ensure that any pending power irq interrupt status is acknowledged otherwise power irq interrupt handler would be fired prematurely. Signed-off-by: Subhash Jadavani Signed-off-by: Vijay Viswanath Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/sdhci-msm.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index fc73e56eb1e2ef..92c483ec6cb2e3 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1251,6 +1251,21 @@ static int sdhci_msm_probe(struct platform_device *pdev) CORE_VENDOR_SPEC_CAPABILITIES0); } + /* + * Power on reset state may trigger power irq if previous status of + * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq + * interrupt in GIC, any pending power irq interrupt should be + * acknowledged. Otherwise power irq interrupt handler would be + * fired prematurely. + */ + sdhci_msm_voltage_switch(host); + + /* + * Ensure that above writes are propogated before interrupt enablement + * in GIC. + */ + mb(); + /* Setup IRQ for handling power/voltage tasks with PMIC */ msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); if (msm_host->pwr_irq < 0) { @@ -1260,6 +1275,9 @@ static int sdhci_msm_probe(struct platform_device *pdev) goto clk_disable; } + /* Enable pwr irq interrupts */ + writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK); + ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, sdhci_msm_pwr_irq, IRQF_ONESHOT, dev_name(&pdev->dev), host); From 9851961159b6d33b7523e73cdedf156204a731bd Mon Sep 17 00:00:00 2001 From: "Edward A. James" Date: Fri, 27 Oct 2017 11:55:05 -0500 Subject: [PATCH 0375/3326] hwmon: (pmbus/core) Prevent unintentional setting of page to 0xFF [ Upstream commit 6dcf2fb5e8db3704f50af1f198256cb4e2453f8b ] The pmbus core may call read/write word data functions with a page value of -1, intending to perform the operation without setting the page. However, the read/write word data functions accept only unsigned 8-bit page numbers, and therefore cannot check for negative page number to avoid setting the page. This results in setting the page number to 0xFF. This may result in errors or undefined behavior of some devices (specifically the ir35221, which allows the page to be set to 0xFF, but some subsequent operations to read registers may fail). Switch the pmbus_set_page page parameter to an integer and perform the check for negative page there. Make read/write functions consistent in accepting an integer page number parameter. Signed-off-by: Edward A. James Fixes: cbcdec6202c9 ("hwmon: (pmbus): Access word data for STATUS_WORD") Signed-off-by: Guenter Roeck Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/hwmon/pmbus/pmbus.h | 6 +++--- drivers/hwmon/pmbus/pmbus_core.c | 25 +++++++++++-------------- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index 4efa2bd4f6d8aa..fa613bd209e344 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -404,9 +404,9 @@ extern const struct regulator_ops pmbus_regulator_ops; /* Function declarations */ void pmbus_clear_cache(struct i2c_client *client); -int pmbus_set_page(struct i2c_client *client, u8 page); -int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg); -int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word); +int pmbus_set_page(struct i2c_client *client, int page); +int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg); +int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, u16 word); int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg); int pmbus_write_byte(struct i2c_client *client, int page, u8 value); int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 302f0aef59dede..52a58b8b6e1bd0 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -136,13 +136,13 @@ void pmbus_clear_cache(struct i2c_client *client) } EXPORT_SYMBOL_GPL(pmbus_clear_cache); -int pmbus_set_page(struct i2c_client *client, u8 page) +int pmbus_set_page(struct i2c_client *client, int page) { struct pmbus_data *data = i2c_get_clientdata(client); int rv = 0; int newpage; - if (page != data->currpage) { + if (page >= 0 && page != data->currpage) { rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE); if (newpage != page) @@ -158,11 +158,9 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value) { int rv; - if (page >= 0) { - rv = pmbus_set_page(client, page); - if (rv < 0) - return rv; - } + rv = pmbus_set_page(client, page); + if (rv < 0) + return rv; return i2c_smbus_write_byte(client, value); } @@ -186,7 +184,8 @@ static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value) return pmbus_write_byte(client, page, value); } -int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word) +int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, + u16 word) { int rv; @@ -219,7 +218,7 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg, return pmbus_write_word_data(client, page, reg, word); } -int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg) +int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg) { int rv; @@ -255,11 +254,9 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg) { int rv; - if (page >= 0) { - rv = pmbus_set_page(client, page); - if (rv < 0) - return rv; - } + rv = pmbus_set_page(client, page); + if (rv < 0) + return rv; return i2c_smbus_read_byte_data(client, reg); } From 4ee9572b109de66357a73e27effed5266a449183 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 Sep 2017 13:38:24 +0200 Subject: [PATCH 0376/3326] perf/core: Fix __perf_read_group_add() locking [ Upstream commit a9cd8194e1e6bd09619954721dfaf0f94fe2003e ] Event timestamps are serialized using ctx->lock, make sure to hold it over reading all values. Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/events/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 10cdb9c26b5d12..4f1d4bfc607a32 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4433,6 +4433,8 @@ static int __perf_read_group_add(struct perf_event *leader, if (ret) return ret; + raw_spin_lock_irqsave(&ctx->lock, flags); + /* * Since we co-schedule groups, {enabled,running} times of siblings * will be identical to those of the leader, so we only publish one @@ -4455,8 +4457,6 @@ static int __perf_read_group_add(struct perf_event *leader, if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(leader); - raw_spin_lock_irqsave(&ctx->lock, flags); - list_for_each_entry(sub, &leader->sibling_list, group_entry) { values[n++] += perf_event_count(sub); if (read_format & PERF_FORMAT_ID) From 6cac6a26e3c76c310e33172133e4621b38143244 Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Sat, 21 Oct 2017 01:02:07 +0300 Subject: [PATCH 0377/3326] usb: phy: tahvo: fix error handling in tahvo_usb_probe() [ Upstream commit ce035409bfa892a2fabb89720b542e1b335c3426 ] If devm_extcon_dev_allocate() fails, we should disable clk before return. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Fixes: 860d2686fda7 ("usb: phy: tahvo: Use devm_extcon_dev_[allocate|register]() and replace deprecated API") Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/usb/phy/phy-tahvo.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c index 8babd318c0ed32..1ec00eae339a28 100644 --- a/drivers/usb/phy/phy-tahvo.c +++ b/drivers/usb/phy/phy-tahvo.c @@ -368,7 +368,8 @@ static int tahvo_usb_probe(struct platform_device *pdev) tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable); if (IS_ERR(tu->extcon)) { dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); - return -ENOMEM; + ret = PTR_ERR(tu->extcon); + goto err_disable_clk; } ret = devm_extcon_dev_register(&pdev->dev, tu->extcon); From dd89f47e21aa3166d557f348deae1affde54f8b8 Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Mon, 9 Oct 2017 14:33:37 +0530 Subject: [PATCH 0378/3326] PCI: dra7xx: Create functional dependency between PCIe and PHY [ Upstream commit 7a4db656a6350f8dd46f711bdef3b0e9c6e3f4cb ] PCI core access configuration space registers in resume_noirq callbacks. In the case of dra7xx, PIPE3 PHY connected to PCIe controller has to be enabled before accessing configuration space registers. Since PIPE3 PHY is enabled by only configuring control module registers, no aborts has been observed so far (though during noirq stage, interface clock of PIPE3 PHY is not enabled). With new TRM updates, PIPE3 PHY has to be initialized (PIPE3 PHY registers has to be accessed) as well which requires the interface clock of PIPE3 PHY to be enabled. The interface clock of PIPE3 PHY is derived from OCP2SCP and hence PCIe PHY is modeled as a child of OCP2SCP. Since pm_runtime is not enabled during noirq stage, pm_runtime_get_sync done in phy_init doesn't enable OCP2SCP clocks resulting in abort when PIPE3 PHY registers are accessed. Create a function dependency between PCIe and PHY here to make sure PCIe is suspended before PCIe PHY/OCP2SCP and resumed after PCIe PHY/OCP2SCP. Suggested-by: Grygorii Strashko Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Sekhar Nori Acked-by: Bjorn Helgaas Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/pci/dwc/pci-dra7xx.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c index 34427a6a15afab..362607f727ee6a 100644 --- a/drivers/pci/dwc/pci-dra7xx.c +++ b/drivers/pci/dwc/pci-dra7xx.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -594,6 +595,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) int i; int phy_count; struct phy **phy; + struct device_link **link; void __iomem *base; struct resource *res; struct dw_pcie *pci; @@ -649,11 +651,21 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) if (!phy) return -ENOMEM; + link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL); + if (!link) + return -ENOMEM; + for (i = 0; i < phy_count; i++) { snprintf(name, sizeof(name), "pcie-phy%d", i); phy[i] = devm_phy_get(dev, name); if (IS_ERR(phy[i])) return PTR_ERR(phy[i]); + + link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); + if (!link[i]) { + ret = -EINVAL; + goto err_link; + } } dra7xx->base = base; @@ -732,6 +744,10 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) pm_runtime_disable(dev); dra7xx_pcie_disable_phy(dra7xx); +err_link: + while (--i >= 0) + device_link_del(link[i]); + return ret; } From 24567dc3cc8a9fb6f7ac7e9a5040e839efe3716c Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Fri, 20 Oct 2017 02:16:57 -0700 Subject: [PATCH 0379/3326] x86/intel_rdt: Initialize bitmask of shareable resource if CDP enabled [ Upstream commit 95953034fb24c16ad0047a98b16427e5935830c4 ] The platform informs via CPUID.(EAX=0x10, ECX=res#):EBX[31:0] (valid res# are only 1 for L3 and 2 for L2) which unit of the allocation may be used by other entities in the platform. This information is valid whether CDP (Code and Data Prioritization) is enabled or not. Ensure that the bitmask of shareable resource is initialized when CDP is enabled. Fixes: 0dd2d7494cd8 ("x86/intel_rdt: Show bitmask of shareable resource with other executing units" Signed-off-by: Reinette Chatre Signed-off-by: Thomas Gleixner Acked-by: Fenghua Yu Acked-by: Vikas Shivappa Acked-by: Tony Luck Link: https://lkml.kernel.org/r/815747bddc820ca221a8924edaf4d1a7324547e4.1508490116.git.reinette.chatre@intel.com Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/intel_rdt.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index cd5fc61ba45020..88dcf847901356 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -267,6 +267,7 @@ static void rdt_get_cdp_l3_config(int type) r->num_closid = r_l3->num_closid / 2; r->cache.cbm_len = r_l3->cache.cbm_len; r->default_ctrl = r_l3->default_ctrl; + r->cache.shareable_bits = r_l3->cache.shareable_bits; r->data_width = (r->cache.cbm_len + 3) / 4; r->alloc_capable = true; /* From 6c5faa44569688d8b984059def5846e9f15b2cff Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Fri, 20 Oct 2017 02:16:59 -0700 Subject: [PATCH 0380/3326] x86/intel_rdt: Fix potential deadlock during resctrl mount [ Upstream commit 87943db7dfb0c5ee5aa74a9ac06346fadd9695c8 ] Sai reported a warning during some MBA tests: [ 236.755559] ====================================================== [ 236.762443] WARNING: possible circular locking dependency detected [ 236.769328] 4.14.0-rc4-yocto-standard #8 Not tainted [ 236.774857] ------------------------------------------------------ [ 236.781738] mount/10091 is trying to acquire lock: [ 236.787071] (cpu_hotplug_lock.rw_sem){++++}, at: [] static_key_enable+0x12/0x30 [ 236.797058] but task is already holding lock: [ 236.803552] (&type->s_umount_key#37/1){+.+.}, at: [] sget_userns+0x32f/0x520 [ 236.813247] which lock already depends on the new lock. [ 236.822353] the existing dependency chain (in reverse order) is: [ 236.830686] -> #4 (&type->s_umount_key#37/1){+.+.}: [ 236.837756] __lock_acquire+0x1100/0x11a0 [ 236.842799] lock_acquire+0xdf/0x1d0 [ 236.847363] down_write_nested+0x46/0x80 [ 236.852310] sget_userns+0x32f/0x520 [ 236.856873] kernfs_mount_ns+0x7e/0x1f0 [ 236.861728] rdt_mount+0x30c/0x440 [ 236.866096] mount_fs+0x38/0x150 [ 236.870262] vfs_kern_mount+0x67/0x150 [ 236.875015] do_mount+0x1df/0xd50 [ 236.879286] SyS_mount+0x95/0xe0 [ 236.883464] entry_SYSCALL_64_fastpath+0x18/0xad [ 236.889183] -> #3 (rdtgroup_mutex){+.+.}: [ 236.895292] __lock_acquire+0x1100/0x11a0 [ 236.900337] lock_acquire+0xdf/0x1d0 [ 236.904899] __mutex_lock+0x80/0x8f0 [ 236.909459] mutex_lock_nested+0x1b/0x20 [ 236.914407] intel_rdt_online_cpu+0x3b/0x4a0 [ 236.919745] cpuhp_invoke_callback+0xce/0xb80 [ 236.925177] cpuhp_thread_fun+0x1c5/0x230 [ 236.930222] smpboot_thread_fn+0x11a/0x1e0 [ 236.935362] kthread+0x152/0x190 [ 236.939536] ret_from_fork+0x27/0x40 [ 236.944097] -> #2 (cpuhp_state-up){+.+.}: [ 236.950199] __lock_acquire+0x1100/0x11a0 [ 236.955241] lock_acquire+0xdf/0x1d0 [ 236.959800] cpuhp_issue_call+0x12e/0x1c0 [ 236.964845] __cpuhp_setup_state_cpuslocked+0x13b/0x2f0 [ 236.971242] __cpuhp_setup_state+0xa7/0x120 [ 236.976483] page_writeback_init+0x43/0x67 [ 236.981623] pagecache_init+0x38/0x3b [ 236.986281] start_kernel+0x3c6/0x41a [ 236.990931] x86_64_start_reservations+0x2a/0x2c [ 236.996650] x86_64_start_kernel+0x72/0x75 [ 237.001793] verify_cpu+0x0/0xfb [ 237.005966] -> #1 (cpuhp_state_mutex){+.+.}: [ 237.012364] __lock_acquire+0x1100/0x11a0 [ 237.017408] lock_acquire+0xdf/0x1d0 [ 237.021969] __mutex_lock+0x80/0x8f0 [ 237.026527] mutex_lock_nested+0x1b/0x20 [ 237.031475] __cpuhp_setup_state_cpuslocked+0x54/0x2f0 [ 237.037777] __cpuhp_setup_state+0xa7/0x120 [ 237.043013] page_alloc_init+0x28/0x30 [ 237.047769] start_kernel+0x148/0x41a [ 237.052425] x86_64_start_reservations+0x2a/0x2c [ 237.058145] x86_64_start_kernel+0x72/0x75 [ 237.063284] verify_cpu+0x0/0xfb [ 237.067456] -> #0 (cpu_hotplug_lock.rw_sem){++++}: [ 237.074436] check_prev_add+0x401/0x800 [ 237.079286] __lock_acquire+0x1100/0x11a0 [ 237.084330] lock_acquire+0xdf/0x1d0 [ 237.088890] cpus_read_lock+0x42/0x90 [ 237.093548] static_key_enable+0x12/0x30 [ 237.098496] rdt_mount+0x406/0x440 [ 237.102862] mount_fs+0x38/0x150 [ 237.107035] vfs_kern_mount+0x67/0x150 [ 237.111787] do_mount+0x1df/0xd50 [ 237.116058] SyS_mount+0x95/0xe0 [ 237.120233] entry_SYSCALL_64_fastpath+0x18/0xad [ 237.125952] other info that might help us debug this: [ 237.134867] Chain exists of: cpu_hotplug_lock.rw_sem --> rdtgroup_mutex --> &type->s_umount_key#37/1 [ 237.148425] Possible unsafe locking scenario: [ 237.155015] CPU0 CPU1 [ 237.160057] ---- ---- [ 237.165100] lock(&type->s_umount_key#37/1); [ 237.169952] lock(rdtgroup_mutex); [ 237.176641] lock(&type->s_umount_key#37/1); [ 237.184287] lock(cpu_hotplug_lock.rw_sem); [ 237.189041] *** DEADLOCK *** When the resctrl filesystem is mounted the locks must be acquired in the same order as was done when the cpus came online: cpu_hotplug_lock before rdtgroup_mutex. This also requires to switch the static_branch_enable() calls to the _cpulocked variant because now cpu hotplug lock is held already. [ tglx: Switched to cpus_read_[un]lock ] Reported-by: Sai Praneeth Prakhya Signed-off-by: Reinette Chatre Tested-by: Sai Praneeth Prakhya Acked-by: Vikas Shivappa Cc: fenghua.yu@intel.com Cc: tony.luck@intel.com Link: https://lkml.kernel.org/r/9c41b91bc2f47d9e95b62b213ecdb45623c47a9f.1508490116.git.reinette.chatre@intel.com Signed-off-by: Thomas Gleixner Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index a869d4a073c5ce..3d433af856a53f 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -1081,6 +1081,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, struct dentry *dentry; int ret; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); /* * resctrl file system can only be mounted once. @@ -1130,12 +1131,12 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, goto out_mondata; if (rdt_alloc_capable) - static_branch_enable(&rdt_alloc_enable_key); + static_branch_enable_cpuslocked(&rdt_alloc_enable_key); if (rdt_mon_capable) - static_branch_enable(&rdt_mon_enable_key); + static_branch_enable_cpuslocked(&rdt_mon_enable_key); if (rdt_alloc_capable || rdt_mon_capable) - static_branch_enable(&rdt_enable_key); + static_branch_enable_cpuslocked(&rdt_enable_key); if (is_mbm_enabled()) { r = &rdt_resources_all[RDT_RESOURCE_L3]; @@ -1157,6 +1158,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, cdp_disable(); out: mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); return dentry; } From c36100e295ffc4d957b0884a82bd32ac5a135c51 Mon Sep 17 00:00:00 2001 From: Aaron Sierra Date: Wed, 4 Oct 2017 10:01:28 -0500 Subject: [PATCH 0381/3326] serial: 8250: Preserve DLD[7:4] for PORT_XR17V35X [ Upstream commit 0ab84da2e076948c49d36197ee7d254125c53eab ] The upper four bits of the XR17V35x fractional divisor register (DLD) control general chip function (RS-485 direction pin polarity, multidrop mode, XON/XOFF parity check, and fast IR mode). Don't allow these bits to be clobbered when setting the baudrate. Signed-off-by: Aaron Sierra Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_port.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index f0cc04f62b6768..8dcfd4978a036c 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -2586,8 +2586,11 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud, serial_dl_write(up, quot); /* XR17V35x UARTs have an extra fractional divisor register (DLD) */ - if (up->port.type == PORT_XR17V35X) + if (up->port.type == PORT_XR17V35X) { + /* Preserve bits not related to baudrate; DLD[7:4]. */ + quot_frac |= serial_port_in(port, 0x2) & 0xf0; serial_port_out(port, 0x2, quot_frac); + } } static unsigned int serial8250_get_baud_rate(struct uart_port *port, From 5715de464ac8ef97fdc0daacd41ac35dcd412e2f Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 20 Oct 2017 08:43:39 +0900 Subject: [PATCH 0382/3326] kprobes: Use synchronize_rcu_tasks() for optprobe with CONFIG_PREEMPT=y [ Upstream commit a30b85df7d599f626973e9cd3056fe755bd778e0 ] We want to wait for all potentially preempted kprobes trampoline execution to have completed. This guarantees that any freed trampoline memory is not in use by any task in the system anymore. synchronize_rcu_tasks() gives such a guarantee, so use it. Also, this guarantees to wait for all potentially preempted tasks on the instructions which will be replaced with a jump. Since this becomes a problem only when CONFIG_PREEMPT=y, enable CONFIG_TASKS_RCU=y for synchronize_rcu_tasks() in that case. Signed-off-by: Masami Hiramatsu Acked-by: Paul E. McKenney Cc: Ananth N Mavinakayanahalli Cc: Linus Torvalds Cc: Naveen N . Rao Cc: Paul E . McKenney Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/150845661962.5443.17724352636247312231.stgit@devbox Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/Kconfig | 2 +- kernel/kprobes.c | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index 057370a0ac4ec7..400b9e1b2f275d 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -91,7 +91,7 @@ config STATIC_KEYS_SELFTEST config OPTPROBES def_bool y depends on KPROBES && HAVE_OPTPROBES - depends on !PREEMPT + select TASKS_RCU if PREEMPT config KPROBES_ON_FTRACE def_bool y diff --git a/kernel/kprobes.c b/kernel/kprobes.c index a1606a4224e14e..a66e838640eaed 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -573,13 +573,15 @@ static void kprobe_optimizer(struct work_struct *work) do_unoptimize_kprobes(); /* - * Step 2: Wait for quiesence period to ensure all running interrupts - * are done. Because optprobe may modify multiple instructions - * there is a chance that Nth instruction is interrupted. In that - * case, running interrupt can return to 2nd-Nth byte of jump - * instruction. This wait is for avoiding it. + * Step 2: Wait for quiesence period to ensure all potentially + * preempted tasks to have normally scheduled. Because optprobe + * may modify multiple instructions, there is a chance that Nth + * instruction is preempted. In that case, such tasks can return + * to 2nd-Nth byte of jump instruction. This wait is for avoiding it. + * Note that on non-preemptive kernel, this is transparently converted + * to synchronoze_sched() to wait for all interrupts to have completed. */ - synchronize_sched(); + synchronize_rcu_tasks(); /* Step 3: Optimize kprobes after quiesence period */ do_optimize_kprobes(); From ec2d8c832343de707fd777f29d5916b4eb6fea28 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Wed, 18 Oct 2017 10:21:07 -0700 Subject: [PATCH 0383/3326] x86/entry: Use SYSCALL_DEFINE() macros for sys_modify_ldt() [ Upstream commit da20ab35180780e4a6eadc804544f1fa967f3567 ] We do not have tracepoints for sys_modify_ldt() because we define it directly instead of using the normal SYSCALL_DEFINEx() macros. However, there is a reason sys_modify_ldt() does not use the macros: it has an 'int' return type instead of 'unsigned long'. This is a bug, but it's a bug cemented in the ABI. What does this mean? If we return -EINVAL from a function that returns 'int', we have 0x00000000ffffffea in %rax. But, if we return -EINVAL from a function returning 'unsigned long', we end up with 0xffffffffffffffea in %rax, which is wrong. To work around this and maintain the 'int' behavior while using the SYSCALL_DEFINEx() macros, so we add a cast to 'unsigned int' in both implementations of sys_modify_ldt(). Signed-off-by: Dave Hansen Reviewed-by: Andy Lutomirski Reviewed-by: Brian Gerst Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20171018172107.1A79C532@viggo.jf.intel.com Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/syscalls.h | 2 +- arch/x86/kernel/ldt.c | 16 +++++++++++++--- arch/x86/um/ldt.c | 7 +++++-- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 91dfcafe27a662..bad25bb80679fe 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -21,7 +21,7 @@ asmlinkage long sys_ioperm(unsigned long, unsigned long, int); asmlinkage long sys_iopl(unsigned int); /* kernel/ldt.c */ -asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); +asmlinkage long sys_modify_ldt(int, void __user *, unsigned long); /* kernel/signal.c */ asmlinkage long sys_rt_sigreturn(void); diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 4d17bacf40308c..ae5615b03def7b 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -295,8 +296,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) return error; } -asmlinkage int sys_modify_ldt(int func, void __user *ptr, - unsigned long bytecount) +SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , + unsigned long , bytecount) { int ret = -ENOSYS; @@ -314,5 +315,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr, ret = write_ldt(ptr, bytecount, 0); break; } - return ret; + /* + * The SYSCALL_DEFINE() macros give us an 'unsigned long' + * return type, but tht ABI for sys_modify_ldt() expects + * 'int'. This cast gives us an int-sized value in %rax + * for the return code. The 'unsigned' is necessary so + * the compiler does not try to sign-extend the negative + * return codes into the high half of the register when + * taking the value from int->long. + */ + return (unsigned int)ret; } diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c index 836a1eb5df436b..3ee234b6234dd6 100644 --- a/arch/x86/um/ldt.c +++ b/arch/x86/um/ldt.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -369,7 +370,9 @@ void free_ldt(struct mm_context *mm) mm->arch.ldt.entry_count = 0; } -int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) +SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , + unsigned long , bytecount) { - return do_modify_ldt_skas(func, ptr, bytecount); + /* See non-um modify_ldt() for why we do this cast */ + return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount); } From ebbd9c27dcf71d2947e30d743cfc2c15e7d8f723 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 16 Oct 2017 16:28:38 +0100 Subject: [PATCH 0384/3326] clocksource/drivers/arm_arch_timer: Validate CNTFRQ after enabling frame [ Upstream commit 21492e1333a0d07af6968667f128e19088cf5ead ] The ACPI GTDT code validates the CNTFRQ field of each MMIO timer frame against the CNTFRQ system register of the current CPU, to ensure that they are equal, which is mandated by the architecture. However, reading the CNTFRQ field of a frame is not possible until the RFRQ bit in the frame's CNTACRn register is set, and doing so before that willl produce the following error: arch_timer: [Firmware Bug]: CNTFRQ mismatch: frame @ 0x00000000e0be0000: (0x00000000), CPU: (0x0ee6b280) arch_timer: Disabling MMIO timers due to CNTFRQ mismatch arch_timer: Failed to initialize memory-mapped timer. The reason is that the CNTFRQ field is RES0 if access is not enabled. So move the validation of CNTFRQ into the loop that iterates over the timers to find the best frame, but defer it until after we have selected the best frame, which should also have enabled the RFRQ bit. Signed-off-by: Ard Biesheuvel Signed-off-by: Mark Rutland Signed-off-by: Daniel Lezcano Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clocksource/arm_arch_timer.c | 38 +++++++++++++++------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index fd4b7f684bd030..14e2419063e93a 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -1268,10 +1268,6 @@ arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem) iounmap(cntctlbase); - if (!best_frame) - pr_err("Unable to find a suitable frame in timer @ %pa\n", - &timer_mem->cntctlbase); - return best_frame; } @@ -1372,6 +1368,8 @@ static int __init arch_timer_mem_of_init(struct device_node *np) frame = arch_timer_mem_find_best_frame(timer_mem); if (!frame) { + pr_err("Unable to find a suitable frame in timer @ %pa\n", + &timer_mem->cntctlbase); ret = -EINVAL; goto out; } @@ -1420,7 +1418,7 @@ arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem) static int __init arch_timer_mem_acpi_init(int platform_timer_count) { struct arch_timer_mem *timers, *timer; - struct arch_timer_mem_frame *frame; + struct arch_timer_mem_frame *frame, *best_frame = NULL; int timer_count, i, ret = 0; timers = kcalloc(platform_timer_count, sizeof(*timers), @@ -1432,14 +1430,6 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count) if (ret || !timer_count) goto out; - for (i = 0; i < timer_count; i++) { - ret = arch_timer_mem_verify_cntfrq(&timers[i]); - if (ret) { - pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); - goto out; - } - } - /* * While unlikely, it's theoretically possible that none of the frames * in a timer expose the combination of feature we want. @@ -1448,12 +1438,26 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count) timer = &timers[i]; frame = arch_timer_mem_find_best_frame(timer); - if (frame) - break; + if (!best_frame) + best_frame = frame; + + ret = arch_timer_mem_verify_cntfrq(timer); + if (ret) { + pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); + goto out; + } + + if (!best_frame) /* implies !frame */ + /* + * Only complain about missing suitable frames if we + * haven't already found one in a previous iteration. + */ + pr_err("Unable to find a suitable frame in timer @ %pa\n", + &timer->cntctlbase); } - if (frame) - ret = arch_timer_mem_frame_register(frame); + if (best_frame) + ret = arch_timer_mem_frame_register(best_frame); out: kfree(timers); return ret; From a105cd032d9d42a61aad547b4af283ead1c0d37b Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 18 Sep 2017 15:46:41 +0200 Subject: [PATCH 0385/3326] dt-bindings: timer: renesas, cmt: Fix SoC-specific compatible values [ Upstream commit e20824e944c3bf4352fcd8d9f446c41b53901e7b ] While the new family-specific compatible values introduced by commit 6f54cc1adcc8957f ("devicetree: bindings: R-Car Gen2 CMT0 and CMT1 bindings") use the recommended order ",-", the new SoC-specific compatible values still use the old and deprecated order ",-". Switch the SoC-specific compatible values to the recommended order while there are no upstream users of these compatible values yet. Fixes: 7f03a0ecfdc786c1 ("devicetree: bindings: r8a73a4 and R-Car Gen2 CMT bindings") Fixes: 63d9e8ca0dd4bfa4 ("devicetree: bindings: Deprecate property, update example") Signed-off-by: Geert Uytterhoeven Acked-by: Rob Herring Reviewed-by: Laurent Pinchart Signed-off-by: Daniel Lezcano Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- .../devicetree/bindings/timer/renesas,cmt.txt | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.txt b/Documentation/devicetree/bindings/timer/renesas,cmt.txt index 6ca6b9e582a0e8..d740989eb56981 100644 --- a/Documentation/devicetree/bindings/timer/renesas,cmt.txt +++ b/Documentation/devicetree/bindings/timer/renesas,cmt.txt @@ -20,16 +20,16 @@ Required Properties: (CMT1 on sh73a0 and r8a7740) This is a fallback for the above renesas,cmt-48-* entries. - - "renesas,cmt0-r8a73a4" for the 32-bit CMT0 device included in r8a73a4. - - "renesas,cmt1-r8a73a4" for the 48-bit CMT1 device included in r8a73a4. - - "renesas,cmt0-r8a7790" for the 32-bit CMT0 device included in r8a7790. - - "renesas,cmt1-r8a7790" for the 48-bit CMT1 device included in r8a7790. - - "renesas,cmt0-r8a7791" for the 32-bit CMT0 device included in r8a7791. - - "renesas,cmt1-r8a7791" for the 48-bit CMT1 device included in r8a7791. - - "renesas,cmt0-r8a7793" for the 32-bit CMT0 device included in r8a7793. - - "renesas,cmt1-r8a7793" for the 48-bit CMT1 device included in r8a7793. - - "renesas,cmt0-r8a7794" for the 32-bit CMT0 device included in r8a7794. - - "renesas,cmt1-r8a7794" for the 48-bit CMT1 device included in r8a7794. + - "renesas,r8a73a4-cmt0" for the 32-bit CMT0 device included in r8a73a4. + - "renesas,r8a73a4-cmt1" for the 48-bit CMT1 device included in r8a73a4. + - "renesas,r8a7790-cmt0" for the 32-bit CMT0 device included in r8a7790. + - "renesas,r8a7790-cmt1" for the 48-bit CMT1 device included in r8a7790. + - "renesas,r8a7791-cmt0" for the 32-bit CMT0 device included in r8a7791. + - "renesas,r8a7791-cmt1" for the 48-bit CMT1 device included in r8a7791. + - "renesas,r8a7793-cmt0" for the 32-bit CMT0 device included in r8a7793. + - "renesas,r8a7793-cmt1" for the 48-bit CMT1 device included in r8a7793. + - "renesas,r8a7794-cmt0" for the 32-bit CMT0 device included in r8a7794. + - "renesas,r8a7794-cmt1" for the 48-bit CMT1 device included in r8a7794. - "renesas,rcar-gen2-cmt0" for 32-bit CMT0 devices included in R-Car Gen2. - "renesas,rcar-gen2-cmt1" for 48-bit CMT1 devices included in R-Car Gen2. @@ -46,7 +46,7 @@ Required Properties: Example: R8A7790 (R-Car H2) CMT0 and CMT1 nodes cmt0: timer@ffca0000 { - compatible = "renesas,cmt0-r8a7790", "renesas,rcar-gen2-cmt0"; + compatible = "renesas,r8a7790-cmt0", "renesas,rcar-gen2-cmt0"; reg = <0 0xffca0000 0 0x1004>; interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>, <0 142 IRQ_TYPE_LEVEL_HIGH>; @@ -55,7 +55,7 @@ Example: R8A7790 (R-Car H2) CMT0 and CMT1 nodes }; cmt1: timer@e6130000 { - compatible = "renesas,cmt1-r8a7790", "renesas,rcar-gen2-cmt1"; + compatible = "renesas,r8a7790-cmt1", "renesas,rcar-gen2-cmt1"; reg = <0 0xe6130000 0 0x1004>; interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>, <0 121 IRQ_TYPE_LEVEL_HIGH>, From a95d0d2ee717908be9b67d1f1b2fb44e1c1210a1 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 16 Oct 2017 12:40:29 -0500 Subject: [PATCH 0386/3326] EDAC, sb_edac: Fix missing break in switch [ Upstream commit a8e9b186f153a44690ad0363a56716e7077ad28c ] Add missing break statement in order to prevent the code from falling through. Signed-off-by: Gustavo A. R. Silva Cc: Qiuxu Zhuo Cc: linux-edac Link: http://lkml.kernel.org/r/20171016174029.GA19757@embeddedor.com Signed-off-by: Borislav Petkov Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/edac/sb_edac.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 6241fa787d6646..cd9d6ba035791f 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -2498,6 +2498,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA: pvt->pci_ta = pdev; + break; case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS: case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS: pvt->pci_ras = pdev; From 8891ee7e676c0adc59b5e6c298de0b109a1934d5 Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 13 Oct 2017 17:10:37 +0800 Subject: [PATCH 0387/3326] usb: mtu3: fix error return code in ssusb_gadget_init() [ Upstream commit c162ff0aaaac456ef29aebd1e9d4d3e305cd3279 ] When failing to get IRQ number, platform_get_irq() may return -EPROBE_DEFER, but we ignore it and always return -ENODEV, so fix it. Signed-off-by: Chunfeng Yun Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/usb/mtu3/mtu3_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c index 99c65b0788ff93..947579842ad788 100644 --- a/drivers/usb/mtu3/mtu3_core.c +++ b/drivers/usb/mtu3/mtu3_core.c @@ -774,9 +774,9 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb) return -ENOMEM; mtu->irq = platform_get_irq(pdev, 0); - if (mtu->irq <= 0) { + if (mtu->irq < 0) { dev_err(dev, "fail to get irq number\n"); - return -ENODEV; + return mtu->irq; } dev_info(dev, "irq %d\n", mtu->irq); From 11d9a37aaf271de4dc9b8016182330ba8f06bf38 Mon Sep 17 00:00:00 2001 From: Ioana Radulescu Date: Wed, 11 Oct 2017 08:29:44 -0500 Subject: [PATCH 0388/3326] staging: fsl-dpaa2/eth: Account for Rx FD buffers on error path [ Upstream commit cbb3ea40fc495bf04070200b35c1c4cd05d11bd3 ] On Rx path, if we fail to build an skb from the incoming FD, we still need to update the channel buffer count accordingly, otherwise we risk depleting the pool while the software counter still sees available buffers. Signed-off-by: Ioana Radulescu Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c index 26017fe9df9361..8e84b2e7f5bda1 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c @@ -131,6 +131,8 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, u16 fd_offset = dpaa2_fd_get_offset(fd); u32 fd_length = dpaa2_fd_get_len(fd); + ch->buf_count--; + skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); if (unlikely(!skb)) @@ -139,8 +141,6 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, skb_reserve(skb, fd_offset); skb_put(skb, fd_length); - ch->buf_count--; - return skb; } @@ -178,8 +178,15 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, /* We build the skb around the first data buffer */ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); - if (unlikely(!skb)) - return NULL; + if (unlikely(!skb)) { + /* We still need to subtract the buffers used + * by this FD from our software counter + */ + while (!dpaa2_sg_is_final(&sgt[i]) && + i < DPAA2_ETH_MAX_SG_ENTRIES) + i++; + break; + } sg_offset = dpaa2_sg_get_offset(sge); skb_reserve(skb, sg_offset); From 2001a980d1ab676f07a455c728fa72ae86d40058 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Sat, 23 Sep 2017 19:36:04 -0500 Subject: [PATCH 0389/3326] staging: rtl8822be: Keep array subscript no lower than zero [ Upstream commit 43d15c2013130a9fa230c2f5203aca818ae0bb86 ] The kbuild test robot reports the following: drivers/staging//rtlwifi/phydm/phydm_dig.c: In function 'odm_pause_dig': drivers/staging//rtlwifi/phydm/phydm_dig.c:494:45: warning: array subscript is below array bounds [-Warray-bounds] odm_write_dig(dm, dig_tab->pause_dig_value[max_level]); This condition is caused when a loop falls through. The fix is to pin max_level to be >= 0. Signed-off-by: Larry Finger c: kbuild test robot Fixes: 9ce99b04b5b82fdf11e4c76b60a5f82c1e541297 staging: r8822be: Add phydm mini driver Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtlwifi/phydm/phydm_dig.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/staging/rtlwifi/phydm/phydm_dig.c b/drivers/staging/rtlwifi/phydm/phydm_dig.c index 31a4f3fcad1935..c88b9788363a78 100644 --- a/drivers/staging/rtlwifi/phydm/phydm_dig.c +++ b/drivers/staging/rtlwifi/phydm/phydm_dig.c @@ -490,6 +490,8 @@ void odm_pause_dig(void *dm_void, enum phydm_pause_type pause_type, break; } + /* pin max_level to be >= 0 */ + max_level = max_t(s8, 0, max_level); /* write IGI of lower level */ odm_write_dig(dm, dig_tab->pause_dig_value[max_level]); ODM_RT_TRACE(dm, ODM_COMP_DIG, From 8876436aa90d03372a2e6480b9a26d19d225603c Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Tue, 10 Oct 2017 13:47:55 +0800 Subject: [PATCH 0390/3326] ARM: cpuidle: Correct driver unregistration if init fails [ Upstream commit 0f87855d969a87f02048ff5ced7503465d5ab2f1 ] If cpuidle init fails, the code misses to unregister the driver for current CPU. Furthermore, we also need to rollback to cancel all previous CPUs registration; but the code retrieves driver handler by using function cpuidle_get_driver(), this function returns back current CPU driver handler but not previous CPU's handler, which leads to the failure handling code cannot unregister previous CPUs driver. This commit fixes two mentioned issues, it adds error handling path 'goto out_unregister_drv' for current CPU driver unregistration; and it is to replace cpuidle_get_driver() with cpuidle_get_cpu_driver(), the later function can retrieve driver handler for previous CPUs according to the CPU device handler so can unregister the driver properly. This patch also adds extra error handling paths 'goto out_kfree_dev' and 'goto out_kfree_drv' and adjusts the freeing sentences for previous CPUs; so make the code more readable for freeing 'dev' and 'drv' structures. Suggested-by: Daniel Lezcano Signed-off-by: Leo Yan Fixes: d50a7d8acd78 (ARM: cpuidle: Support asymmetric idle definition) Acked-by: Daniel Lezcano Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/cpuidle/cpuidle-arm.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 52a75053ee0312..f47c545467522e 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -104,13 +104,13 @@ static int __init arm_idle_init(void) ret = dt_init_idle_driver(drv, arm_idle_state_match, 1); if (ret <= 0) { ret = ret ? : -ENODEV; - goto init_fail; + goto out_kfree_drv; } ret = cpuidle_register_driver(drv); if (ret) { pr_err("Failed to register cpuidle driver\n"); - goto init_fail; + goto out_kfree_drv; } /* @@ -128,14 +128,14 @@ static int __init arm_idle_init(void) if (ret) { pr_err("CPU %d failed to init idle CPU ops\n", cpu); - goto out_fail; + goto out_unregister_drv; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { pr_err("Failed to allocate cpuidle device\n"); ret = -ENOMEM; - goto out_fail; + goto out_unregister_drv; } dev->cpu = cpu; @@ -143,21 +143,25 @@ static int __init arm_idle_init(void) if (ret) { pr_err("Failed to register cpuidle device for CPU %d\n", cpu); - kfree(dev); - goto out_fail; + goto out_kfree_dev; } } return 0; -init_fail: + +out_kfree_dev: + kfree(dev); +out_unregister_drv: + cpuidle_unregister_driver(drv); +out_kfree_drv: kfree(drv); out_fail: while (--cpu >= 0) { dev = per_cpu(cpuidle_devices, cpu); + drv = cpuidle_get_cpu_driver(dev); cpuidle_unregister_device(dev); - kfree(dev); - drv = cpuidle_get_driver(); cpuidle_unregister_driver(drv); + kfree(dev); kfree(drv); } From 9e99614575b9f1e66249729878a415b9b6659ee7 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Thu, 5 Oct 2017 11:21:43 +0300 Subject: [PATCH 0391/3326] usb: xhci: Return error when host is dead in xhci_disable_slot() [ Upstream commit dcabc76fa9361186e6b88c30a68db8fa9d5b4a1c ] xhci_disable_slot() is a helper for disabling a slot when a device goes away or recovers from error situations. Currently, it returns success when it sees a dead host. This is not the right way to go. It should return error and let the invoker know that disable slot command was failed due to a dead host. Fixes: f9e609b82479 ("usb: xhci: Add helper function xhci_disable_slot().") Cc: Guoqing Zhang Signed-off-by: Lu Baolu Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 51535ba2bcd425..e5677700dea4d5 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3583,10 +3583,9 @@ int xhci_disable_slot(struct xhci_hcd *xhci, struct xhci_command *command, state = readl(&xhci->op_regs->status); if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || (xhci->xhc_state & XHCI_STATE_HALTED)) { - xhci_free_virt_device(xhci, slot_id); spin_unlock_irqrestore(&xhci->lock, flags); kfree(command); - return ret; + return -ENODEV; } ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, From dddd2bd98cb2a547c9fd08d68273c69d5bca3e1f Mon Sep 17 00:00:00 2001 From: Jibin Xu Date: Sun, 10 Sep 2017 20:11:42 -0700 Subject: [PATCH 0392/3326] sysrq : fix Show Regs call trace on ARM [ Upstream commit b00bebbc301c8e1f74f230dc82282e56b7e7a6db ] When kernel configuration SMP,PREEMPT and DEBUG_PREEMPT are enabled, echo 1 >/proc/sys/kernel/sysrq echo p >/proc/sysrq-trigger kernel will print call trace as below: sysrq: SysRq : Show Regs BUG: using __this_cpu_read() in preemptible [00000000] code: sh/435 caller is __this_cpu_preempt_check+0x18/0x20 Call trace: [] dump_backtrace+0x0/0x1d0 [] show_stack+0x24/0x30 [] dump_stack+0x90/0xb0 [] check_preemption_disabled+0x100/0x108 [] __this_cpu_preempt_check+0x18/0x20 [] sysrq_handle_showregs+0x1c/0x40 [] __handle_sysrq+0x12c/0x1a0 [] write_sysrq_trigger+0x60/0x70 [] proc_reg_write+0x90/0xd0 [] __vfs_write+0x48/0x90 [] vfs_write+0xa4/0x190 [] SyS_write+0x54/0xb0 [] el0_svc_naked+0x24/0x28 This can be seen on a common board like an r-pi3. This happens because when echo p >/proc/sysrq-trigger, get_irq_regs() is called outside of IRQ context, if preemption is enabled in this situation,kernel will print the call trace. Since many prior discussions on the mailing lists have made it clear that get_irq_regs either just returns NULL or stale data when used outside of IRQ context,we simply avoid calling it outside of IRQ context. Signed-off-by: Jibin Xu Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/tty/sysrq.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index d008f5a7519718..377b3592384e0d 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -246,8 +246,10 @@ static void sysrq_handle_showallcpus(int key) * architecture has no support for it: */ if (!trigger_all_cpu_backtrace()) { - struct pt_regs *regs = get_irq_regs(); + struct pt_regs *regs = NULL; + if (in_irq()) + regs = get_irq_regs(); if (regs) { pr_info("CPU%d:\n", smp_processor_id()); show_regs(regs); @@ -266,7 +268,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = { static void sysrq_handle_showregs(int key) { - struct pt_regs *regs = get_irq_regs(); + struct pt_regs *regs = NULL; + + if (in_irq()) + regs = get_irq_regs(); if (regs) show_regs(regs); perf_event_print_debug(); From 25aafb3061f64099e379dfb24e394b07f3f93b38 Mon Sep 17 00:00:00 2001 From: Andy Lowe Date: Fri, 22 Sep 2017 20:29:30 +0200 Subject: [PATCH 0393/3326] serial: sh-sci: suppress warning for ports without dma channels [ Upstream commit 7464779fa8551b90d5797d4020b0bdb7e6422eb9 ] If a port has no dma channel defined in the device tree, then don't attempt to allocate a dma channel for the port. Also suppress the warning message concerning the failure to allocate a dma channel. Continue to emit the warning message if a dma channel is defined but cannot be allocated. Signed-off-by: Andy Lowe Signed-off-by: Eugeniu Rosca Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/sh-sci.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 784dd42002eadb..761b9f5f14915b 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1491,6 +1491,14 @@ static void sci_request_dma(struct uart_port *port) return; s->cookie_tx = -EINVAL; + + /* + * Don't request a dma channel if no channel was specified + * in the device tree. + */ + if (!of_find_property(port->dev->of_node, "dmas", NULL)) + return; + chan = sci_request_dma_chan(port, DMA_MEM_TO_DEV); dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan); if (chan) { From 87837b102c0630fd559a412f6fa48c8e9710755c Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sun, 1 Oct 2017 02:18:37 +0100 Subject: [PATCH 0394/3326] usbip: tools: Install all headers needed for libusbip development [ Upstream commit c15562c0dcb2c7f26e891923b784cf1926b8c833 ] usbip_host_driver.h now depends on several additional headers, which need to be installed along with it. Fixes: 021aed845303 ("staging: usbip: userspace: migrate usbip_host_driver ...") Fixes: 3391ba0e2792 ("usbip: tools: Extract generic code to be shared with ...") Signed-off-by: Ben Hutchings Acked-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/usb/usbip/Makefile.am | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/usb/usbip/Makefile.am b/tools/usb/usbip/Makefile.am index da3a430849a873..5961e9c188121f 100644 --- a/tools/usb/usbip/Makefile.am +++ b/tools/usb/usbip/Makefile.am @@ -2,6 +2,7 @@ SUBDIRS := libsrc src includedir = @includedir@/usbip include_HEADERS := $(addprefix libsrc/, \ - usbip_common.h vhci_driver.h usbip_host_driver.h) + usbip_common.h vhci_driver.h usbip_host_driver.h \ + list.h sysfs_utils.h usbip_host_common.h) dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8) From c25bec2de9268122407a3f431dd7d031c06f293d Mon Sep 17 00:00:00 2001 From: Ian Jamison Date: Thu, 21 Sep 2017 10:13:12 +0200 Subject: [PATCH 0395/3326] serial: imx: Update cached mctrl value when changing RTS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit a0983c742a5885f82afb282166f83f1d3d8addf4 ] UART core function uart_update_mctrl relies on a cached value of modem control lines. This was used but not updated by local RTS control functions within imx.c. These are used for RS485 line driver enable signalling. Having an out-of-date value in the cached mctrl can result in the transmitter being enabled when it shouldn't be. Fix this by updating the mctrl value before applying it. Signed-off-by: Ian Jamison Origin: id:8195c96e674517b82a6ff7fe914c7ba0f86e702b.1505375165.git.ian.dev@arkver.com Acked-by: Uwe Kleine-König Tested-by: Uwe Kleine-König Tested-by: Clemens Gruber Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/imx.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index dfeff3951f9346..3657d745e90f0c 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -334,7 +334,8 @@ static void imx_port_rts_active(struct imx_port *sport, unsigned long *ucr2) { *ucr2 &= ~(UCR2_CTSC | UCR2_CTS); - mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS); + sport->port.mctrl |= TIOCM_RTS; + mctrl_gpio_set(sport->gpios, sport->port.mctrl); } static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2) @@ -342,7 +343,8 @@ static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2) *ucr2 &= ~UCR2_CTSC; *ucr2 |= UCR2_CTS; - mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS); + sport->port.mctrl &= ~TIOCM_RTS; + mctrl_gpio_set(sport->gpios, sport->port.mctrl); } static void imx_port_rts_auto(struct imx_port *sport, unsigned long *ucr2) From 9093d8bc530402a7d7651be40dd53b0b97a813af Mon Sep 17 00:00:00 2001 From: Ioana Radulescu Date: Thu, 28 Sep 2017 09:10:33 -0500 Subject: [PATCH 0396/3326] staging: fsl-mc/dpio: Fix incorrect comparison [ Upstream commit 8dabf52ffb6445fa5bcc8b6d2ecb615f60d0dd12 ] For some dpio functions, a cpu id parameter value of -1 is valid and means "any". But when trying to validate this param value against an upper limit, in this case num_possible_cpus(), we risk obtaining the wrong result due to an implicit cast. Avoid an incorrect check result by explicitly comparing the cpu id with the "any" value before verifying the upper bound. Signed-off-by: Ioana Radulescu Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/fsl-mc/bus/dpio/dpio-service.c | 4 ++-- drivers/staging/fsl-mc/include/dpaa2-io.h | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c index f8096828f5b711..a609ec82daf34e 100644 --- a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c +++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c @@ -76,7 +76,7 @@ static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d, if (d) return d; - if (unlikely(cpu >= num_possible_cpus())) + if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus()) return NULL; /* @@ -121,7 +121,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) return NULL; /* check if CPU is out of range (-1 means any cpu) */ - if (desc->cpu >= num_possible_cpus()) { + if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) { kfree(obj); return NULL; } diff --git a/drivers/staging/fsl-mc/include/dpaa2-io.h b/drivers/staging/fsl-mc/include/dpaa2-io.h index c5646096c5d4c9..afc2d060d077cf 100644 --- a/drivers/staging/fsl-mc/include/dpaa2-io.h +++ b/drivers/staging/fsl-mc/include/dpaa2-io.h @@ -54,6 +54,8 @@ struct device; * for dequeue. */ +#define DPAA2_IO_ANY_CPU -1 + /** * struct dpaa2_io_desc - The DPIO descriptor * @receives_notifications: Use notificaton mode. Non-zero if the DPIO @@ -91,8 +93,8 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj); * @cb: The callback to be invoked when the notification arrives * @is_cdan: Zero for FQDAN, non-zero for CDAN * @id: FQID or channel ID, needed for rearm - * @desired_cpu: The cpu on which the notifications will show up. -1 means - * any CPU. + * @desired_cpu: The cpu on which the notifications will show up. Use + * DPAA2_IO_ANY_CPU if don't care * @dpio_id: The dpio index * @qman64: The 64-bit context value shows up in the FQDAN/CDAN. * @node: The list node From 6bb9d1772d4a07c1c71e4b82035a67c11f21ad20 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Wed, 13 Sep 2017 10:12:09 +0200 Subject: [PATCH 0397/3326] perf test attr: Fix ignored test case result [ Upstream commit 22905582f6dd4bbd0c370fe5732c607452010c04 ] Command perf test -v 16 (Setup struct perf_event_attr test) always reports success even if the test case fails. It works correctly if you also specify -F (for don't fork). root@s35lp76 perf]# ./perf test -v 16 15: Setup struct perf_event_attr : --- start --- running './tests/attr/test-record-no-delay' [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.002 MB /tmp/tmp4E1h7R/perf.data (1 samples) ] expected task=0, got 1 expected precise_ip=0, got 3 expected wakeup_events=1, got 0 FAILED './tests/attr/test-record-no-delay' - match failure test child finished with 0 ---- end ---- Setup struct perf_event_attr: Ok The reason for the wrong error reporting is the return value of the system() library call. It is called in run_dir() file tests/attr.c and returns the exit status, in above case 0xff00. This value is given as parameter to the exit() function which can only handle values 0-0xff. The child process terminates with exit value of 0 and the parent does not detect any error. This patch corrects the error reporting and prints the correct test result. Signed-off-by: Thomas-Mich Richter Acked-by: Jiri Olsa Cc: Heiko Carstens Cc: Hendrik Brueckner Cc: Martin Schwidefsky Cc: Thomas-Mich Richter LPU-Reference: 20170913081209.39570-2-tmricht@linux.vnet.ibm.com Link: http://lkml.kernel.org/n/tip-rdube6rfcjsr1nzue72c7lqn@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/perf/tests/attr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c index c180bbcdbef602..0e1367f90af535 100644 --- a/tools/perf/tests/attr.c +++ b/tools/perf/tests/attr.c @@ -167,7 +167,7 @@ static int run_dir(const char *d, const char *perf) snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s", d, d, perf, vcnt, v); - return system(cmd); + return system(cmd) ? TEST_FAIL : TEST_OK; } int test__attr(struct test *test __maybe_unused, int subtest __maybe_unused) From c530d86ded9becc305aaf984cab5d547b6fd6480 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Wed, 13 Sep 2017 10:12:08 +0200 Subject: [PATCH 0398/3326] perf test attr: Fix python error on empty result [ Upstream commit 3440fe2790aa3d13530260af6033533b18959aee ] Commit d78ada4a767 ("perf tests attr: Do not store failed events") does not create an event file in the /tmp directory when the perf_open_event() system call failed. This can lead to a situation where not /tmp/event-xx-yy-zz result file exists at all (for example on a s390x virtual machine environment) where no CPUMF hardware is available. The following command then fails with a python call back chain instead of printing failure: [root@s8360046 perf]# /usr/bin/python2 ./tests/attr.py -d ./tests/attr/ \ -p ./perf -v -ttest-stat-basic running './tests/attr//test-stat-basic' Traceback (most recent call last): File "./tests/attr.py", line 379, in main() File "./tests/attr.py", line 370, in main run_tests(options) File "./tests/attr.py", line 311, in run_tests Test(f, options).run() File "./tests/attr.py", line 300, in run self.compare(self.expect, self.result) File "./tests/attr.py", line 248, in compare exp_event.diff(res_event) UnboundLocalError: local variable 'res_event' referenced before assignment [root@s8360046 perf]# This patch catches this pitfall and prints an error message instead: [root@s8360047 perf]# /usr/bin/python2 ./tests/attr.py -d ./tests/attr/ \ -p ./perf -vvv -ttest-stat-basic running './tests/attr//test-stat-basic' loading expected events Event event:base-stat fd = 1 group_fd = -1 flags = 0|8 [....] sample_regs_user = 0 sample_stack_user = 0 'PERF_TEST_ATTR=/tmp/tmpJbMQMP ./perf stat -o /tmp/tmpJbMQMP/perf.data -e cycles kill >/dev/null 2>&1' ret '1', expected '1' loading result events compare matching [event:base-stat] match: [event:base-stat] matches [] res_event is empty FAILED './tests/attr//test-stat-basic' - match failure [root@s8360047 perf]# Signed-off-by: Thomas-Mich Richter Acked-by: Jiri Olsa Cc: Heiko Carstens Cc: Hendrik Brueckner Cc: Martin Schwidefsky Cc: Thomas-Mich Richter LPU-Reference: 20170913081209.39570-1-tmricht@linux.vnet.ibm.com Link: http://lkml.kernel.org/n/tip-04d63nn7svfgxdhi60gq2mlm@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/perf/tests/attr.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py index 907b1b2f56ad45..ff9b60b99f52a9 100644 --- a/tools/perf/tests/attr.py +++ b/tools/perf/tests/attr.py @@ -238,6 +238,7 @@ def compare(self, expect, result): # events in result. Fail if there's not any. for exp_name, exp_event in expect.items(): exp_list = [] + res_event = {} log.debug(" matching [%s]" % exp_name) for res_name, res_event in result.items(): log.debug(" to [%s]" % res_name) @@ -254,7 +255,10 @@ def compare(self, expect, result): if exp_event.optional(): log.debug(" %s does not match, but is optional" % exp_name) else: - exp_event.diff(res_event) + if not res_event: + log.debug(" res_event is empty"); + else: + exp_event.diff(res_event) raise Fail(self, 'match failure'); match[exp_name] = exp_list From 4d0d3ac1ec32444ad985cb92272c94d07e43e9f7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 19 Sep 2017 19:01:40 +0900 Subject: [PATCH 0399/3326] kprobes/x86: Disable preemption in ftrace-based jprobes [ Upstream commit 5bb4fc2d8641219732eb2bb654206775a4219aca ] Disable preemption in ftrace-based jprobe handlers as described in Documentation/kprobes.txt: "Probe handlers are run with preemption disabled." This will fix jprobes behavior when CONFIG_PREEMPT=y. Signed-off-by: Masami Hiramatsu Cc: Alexei Starovoitov Cc: Alexei Starovoitov Cc: Ananth N Mavinakayanahalli Cc: Linus Torvalds Cc: Paul E . McKenney Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/150581530024.32348.9863783558598926771.stgit@devbox Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/kprobes/ftrace.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 041f7b6dfa0fe0..bcfee4f69b0e53 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -26,7 +26,7 @@ #include "common.h" static nokprobe_inline -int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, +void __skip_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, unsigned long orig_ip) { /* @@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, __this_cpu_write(current_kprobe, NULL); if (orig_ip) regs->ip = orig_ip; - return 1; } int skip_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { - if (kprobe_ftrace(p)) - return __skip_singlestep(p, regs, kcb, 0); - else - return 0; + if (kprobe_ftrace(p)) { + __skip_singlestep(p, regs, kcb, 0); + preempt_enable_no_resched(); + return 1; + } + return 0; } NOKPROBE_SYMBOL(skip_singlestep); -/* Ftrace callback handler for kprobes */ +/* Ftrace callback handler for kprobes -- called under preepmt disabed */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct pt_regs *regs) { @@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ regs->ip = ip + sizeof(kprobe_opcode_t); + /* To emulate trap based kprobes, preempt_disable here */ + preempt_disable(); __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (!p->pre_handler || !p->pre_handler(p, regs)) + if (!p->pre_handler || !p->pre_handler(p, regs)) { __skip_singlestep(p, regs, kcb, orig_ip); + preempt_enable_no_resched(); + } /* * If pre_handler returns !0, it sets regs->ip and - * resets current kprobe. + * resets current kprobe, and keep preempt count +1. */ } end: From bef9bcf53789a454f54adf01c654650189628ab0 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Sat, 2 Sep 2017 13:09:45 -0700 Subject: [PATCH 0400/3326] locking/refcounts, x86/asm: Use unique .text section for refcount exceptions [ Upstream commit 564c9cc84e2adf8a6671c1937f0a9fe3da2a4b0e ] Using .text.unlikely for refcount exceptions isn't safe because gcc may move entire functions into .text.unlikely (e.g. in6_dev_dev()), which would cause any uses of a protected refcount_t function to stay inline with the function, triggering the protection unconditionally: .section .text.unlikely,"ax",@progbits .type in6_dev_get, @function in6_dev_getx: .LFB4673: .loc 2 4128 0 .cfi_startproc ... lock; incl 480(%rbx) js 111f .pushsection .text.unlikely 111: lea 480(%rbx), %rcx 112: .byte 0x0f, 0xff .popsection 113: This creates a unique .text..refcount section and adds an additional test to the exception handler to WARN in the case of having none of OF, SF, nor ZF set so we can see things like this more easily in the future. The double dot for the section name keeps it out of the TEXT_MAIN macro namespace, to avoid collisions and so it can be put at the end with text.unlikely to keep the cold code together. See commit: cb87481ee89db ("kbuild: linker script do not match C names unless LD_DEAD_CODE_DATA_ELIMINATION is configured") ... which matches C names: [a-zA-Z0-9_] but not ".". Reported-by: Mike Galbraith Signed-off-by: Kees Cook Cc: Ard Biesheuvel Cc: Elena Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-arch Fixes: 7a46ec0e2f48 ("locking/refcounts, x86/asm: Implement fast refcount overflow protection") Link: http://lkml.kernel.org/r/1504382986-49301-2-git-send-email-keescook@chromium.org Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/refcount.h | 2 +- arch/x86/mm/extable.c | 7 ++++++- include/asm-generic/vmlinux.lds.h | 1 + 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h index ff871210b9f2f3..4e44250e7d0d75 100644 --- a/arch/x86/include/asm/refcount.h +++ b/arch/x86/include/asm/refcount.h @@ -15,7 +15,7 @@ * back to the regular execution flow in .text. */ #define _REFCOUNT_EXCEPTION \ - ".pushsection .text.unlikely\n" \ + ".pushsection .text..refcount\n" \ "111:\tlea %[counter], %%" _ASM_CX "\n" \ "112:\t" ASM_UD0 "\n" \ ASM_UNREACHABLE \ diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index c3521e2be39610..3321b446b66cdb 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -67,12 +67,17 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup, * wrapped around) will be set. Additionally, seeing the refcount * reach 0 will set ZF (Zero Flag: result was zero). In each of * these cases we want a report, since it's a boundary condition. - * + * The SF case is not reported since it indicates post-boundary + * manipulations below zero or above INT_MAX. And if none of the + * flags are set, something has gone very wrong, so report it. */ if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) { bool zero = regs->flags & X86_EFLAGS_ZF; refcount_error_report(regs, zero ? "hit zero" : "overflow"); + } else if ((regs->flags & X86_EFLAGS_SF) == 0) { + /* Report if none of OF, ZF, nor SF are set. */ + refcount_error_report(regs, "unexpected saturation"); } return true; diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8acfc1e099e110..e549bff87c5b4e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -459,6 +459,7 @@ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ + *(.text..refcount) \ *(.ref.text) \ MEM_KEEP(init.text) \ MEM_KEEP(exit.text) \ From a9f359f24ce70a291997b3644aeead56d0c44cd6 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 11 Sep 2017 11:24:23 +0200 Subject: [PATCH 0401/3326] s390/ptrace: fix guarded storage regset handling [ Upstream commit 5ef2d5231d547c672c67bdf84c13a4adaf477964 ] If the guarded storage regset for current is supposed to be changed, the regset from user space is copied directly into the guarded storage control block. If then the process gets scheduled away while the control block is being copied and before the new control block has been loaded, the result is random: the process can be scheduled away due to a page fault or preemption. If that happens the already copied parts will be overwritten by save_gs_cb(), called from switch_to(). Avoid this by copying the data to a temporary buffer on the stack and do the actual update with preemption disabled. Fixes: f5bbd7219891 ("s390/ptrace: guarded storage regset for the current task") Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/s390/kernel/ptrace.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 1427d60ce628ce..56e0190d6e6518 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -1172,26 +1172,37 @@ static int s390_gs_cb_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - struct gs_cb *data = target->thread.gs_cb; + struct gs_cb gs_cb = { }, *data = NULL; int rc; if (!MACHINE_HAS_GS) return -ENODEV; - if (!data) { + if (!target->thread.gs_cb) { data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - data->gsd = 25; - target->thread.gs_cb = data; - if (target == current) - __ctl_set_bit(2, 4); - } else if (target == current) { - save_gs_cb(data); } + if (!target->thread.gs_cb) + gs_cb.gsd = 25; + else if (target == current) + save_gs_cb(&gs_cb); + else + gs_cb = *target->thread.gs_cb; rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - data, 0, sizeof(struct gs_cb)); - if (target == current) - restore_gs_cb(data); + &gs_cb, 0, sizeof(gs_cb)); + if (rc) { + kfree(data); + return -EFAULT; + } + preempt_disable(); + if (!target->thread.gs_cb) + target->thread.gs_cb = data; + *target->thread.gs_cb = gs_cb; + if (target == current) { + __ctl_set_bit(2, 4); + restore_gs_cb(target->thread.gs_cb); + } + preempt_enable(); return rc; } From cf33c88d64557ab627eca91c79c8a7891a9ba9d4 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 21 Sep 2017 12:12:17 -0300 Subject: [PATCH 0402/3326] tools include: Do not use poison with C++ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 6ae8eefc6c8fe050f057781b70a83262eb0a61ee ] LIST_POISON[12] are used to initialize list_head and hlist_node pointers, and do void pointer arithmetic, which C++ doesn't like, so, to avoid drifting from the kernel by introducing some HLIST_POISON to do away with void pointer math, just make those poisoned pointers be NULL when building it with a C++ compiler. Noticed with: $ make LLVM_CONFIG=/usr/bin/llvm-config-3.9 LIBCLANGLLVM=1 CXX util/c++/clang.o CXX util/c++/clang-test.o In file included from /home/lizj/linux/tools/include/linux/list.h:5:0, from /home/lizj/linux/tools/perf/util/namespaces.h:13, from /home/lizj/linux/tools/perf/util/util.h:15, from /home/lizj/linux/tools/perf/util/util-cxx.h:20, from util/c++/clang-c.h:5, from util/c++/clang-test.cpp:2: /home/lizj/linux/tools/include/linux/list.h: In function ‘void list_del(list_head*)’: /home/lizj/linux/tools/include/linux/poison.h:14:31: error: pointer of type ‘void *’ used in arithmetic [-Werror=pointer-arith] # define POISON_POINTER_DELTA 0 ^ /home/lizj/linux/tools/include/linux/poison.h:22:41: note: in expansion of macro ‘POISON_POINTER_DELTA’ #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) ^ /home/lizj/linux/tools/include/linux/list.h:107:16: note: in expansion of macro ‘LIST_POISON1’ entry->next = LIST_POISON1; ^ In file included from /home/lizj/linux/tools/perf/util/namespaces.h:13:0, from /home/lizj/linux/tools/perf/util/util.h:15, from /home/lizj/linux/tools/perf/util/util-cxx.h:20, from util/c++/clang-c.h:5, from util/c++/clang-test.cpp:2: /home/lizj/linux/tools/include/linux/list.h:107:14: error: invalid conversion from ‘void*’ to ‘list_head*’ [-fpermissive] Reported-by: Li Zhijian Cc: Adrian Hunter Cc: Alexander Shishkin Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Philip Li Cc: Wang Nan Link: http://lkml.kernel.org/n/tip-m5ei2o0mjshucbr28baf5lqz@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/include/linux/poison.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/include/linux/poison.h b/tools/include/linux/poison.h index 4bf6777a8a035a..9fdcd3eaac3b3b 100644 --- a/tools/include/linux/poison.h +++ b/tools/include/linux/poison.h @@ -15,6 +15,10 @@ # define POISON_POINTER_DELTA 0 #endif +#ifdef __cplusplus +#define LIST_POISON1 NULL +#define LIST_POISON2 NULL +#else /* * These are non-NULL pointers that will result in page faults * under normal circumstances, used to verify that nobody uses @@ -22,6 +26,7 @@ */ #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) #define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) +#endif /********** include/linux/timer.h **********/ /* From 6d29ae49cd31597586835fd22957adba5368cdb1 Mon Sep 17 00:00:00 2001 From: Martin Kepplinger Date: Wed, 13 Sep 2017 21:14:19 +0200 Subject: [PATCH 0403/3326] perf tools: Fix leaking rec_argv in error cases [ Upstream commit c896f85a7c15ab9d040ffac8b8003e47996602a2 ] Let's free the allocated rec_argv in case we return early, in order to avoid leaking memory. This adds free() at a few very similar places across the tree where it was missing. Signed-off-by: Martin Kepplinger Cc: Alexander Shishkin Cc: Martin kepplinger Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20170913191419.29806-1-martink@posteo.de Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/perf/builtin-c2c.c | 1 + tools/perf/builtin-mem.c | 1 + tools/perf/builtin-timechart.c | 4 +++- tools/perf/builtin-trace.c | 1 + 4 files changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c index fd32ad08c6d47d..d00aac51130dae 100644 --- a/tools/perf/builtin-c2c.c +++ b/tools/perf/builtin-c2c.c @@ -2733,6 +2733,7 @@ static int perf_c2c__record(int argc, const char **argv) if (!perf_mem_events[j].supported) { pr_err("failed: event '%s' not supported\n", perf_mem_events[j].name); + free(rec_argv); return -1; } diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c index 4db96008527306..e15efba605f6d5 100644 --- a/tools/perf/builtin-mem.c +++ b/tools/perf/builtin-mem.c @@ -113,6 +113,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) if (!perf_mem_events[j].supported) { pr_err("failed: event '%s' not supported\n", perf_mem_events__name(j)); + free(rec_argv); return -1; } diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 4e2e6169598635..01de01ca14f202 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -1732,8 +1732,10 @@ static int timechart__io_record(int argc, const char **argv) if (rec_argv == NULL) return -ENOMEM; - if (asprintf(&filter, "common_pid != %d", getpid()) < 0) + if (asprintf(&filter, "common_pid != %d", getpid()) < 0) { + free(rec_argv); return -ENOMEM; + } p = rec_argv; for (i = 0; i < common_args_nr; i++) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index d5d7fff1c211ca..8e3c4ec00017dd 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -2086,6 +2086,7 @@ static int trace__record(struct trace *trace, int argc, const char **argv) rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit"; else { pr_err("Neither raw_syscalls nor syscalls events exist.\n"); + free(rec_argv); return -1; } } From 342ee877580058554fe68480756c7d326b625bed Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Sat, 9 Sep 2017 00:56:03 +0300 Subject: [PATCH 0404/3326] mm, x86/mm: Fix performance regression in get_user_pages_fast() [ Upstream commit 5b65c4677a57a1d4414212f9995aa0e46a21ff80 ] The 0-day test bot found a performance regression that was tracked down to switching x86 to the generic get_user_pages_fast() implementation: http://lkml.kernel.org/r/20170710024020.GA26389@yexl-desktop The regression was caused by the fact that we now use local_irq_save() + local_irq_restore() in get_user_pages_fast() to disable interrupts. In x86 implementation local_irq_disable() + local_irq_enable() was used. The fix is to make get_user_pages_fast() use local_irq_disable(), leaving local_irq_save() for __get_user_pages_fast() that can be called with interrupts disabled. Numbers for pinning a gigabyte of memory, one page a time, 20 repeats: Before: Average: 14.91 ms, stddev: 0.45 ms After: Average: 10.76 ms, stddev: 0.18 ms Signed-off-by: Kirill A. Shutemov Cc: Andrew Morton Cc: Huang Ying Cc: Jonathan Corbet Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Thorsten Leemhuis Cc: linux-mm@kvack.org Fixes: e585513b76f7 ("x86/mm/gup: Switch GUP to the generic get_user_page_fast() implementation") Link: http://lkml.kernel.org/r/20170908215603.9189-3-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- mm/gup.c | 97 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 58 insertions(+), 39 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index 165ba2174c7584..e0d82b6706d72d 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1707,6 +1707,47 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, return 1; } +static void gup_pgd_range(unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + unsigned long next; + pgd_t *pgdp; + + pgdp = pgd_offset(current->mm, addr); + do { + pgd_t pgd = READ_ONCE(*pgdp); + + next = pgd_addr_end(addr, end); + if (pgd_none(pgd)) + return; + if (unlikely(pgd_huge(pgd))) { + if (!gup_huge_pgd(pgd, pgdp, addr, next, write, + pages, nr)) + return; + } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { + if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, + PGDIR_SHIFT, next, write, pages, nr)) + return; + } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr)) + return; + } while (pgdp++, addr = next, addr != end); +} + +#ifndef gup_fast_permitted +/* + * Check if it's allowed to use __get_user_pages_fast() for the range, or + * we need to fall back to the slow version: + */ +bool gup_fast_permitted(unsigned long start, int nr_pages, int write) +{ + unsigned long len, end; + + len = (unsigned long) nr_pages << PAGE_SHIFT; + end = start + len; + return end >= start; +} +#endif + /* * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. It will only return non-negative values. @@ -1714,10 +1755,8 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { - struct mm_struct *mm = current->mm; unsigned long addr, len, end; - unsigned long next, flags; - pgd_t *pgdp; + unsigned long flags; int nr = 0; start &= PAGE_MASK; @@ -1741,45 +1780,15 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, * block IPIs that come from THPs splitting. */ - local_irq_save(flags); - pgdp = pgd_offset(mm, addr); - do { - pgd_t pgd = READ_ONCE(*pgdp); - - next = pgd_addr_end(addr, end); - if (pgd_none(pgd)) - break; - if (unlikely(pgd_huge(pgd))) { - if (!gup_huge_pgd(pgd, pgdp, addr, next, write, - pages, &nr)) - break; - } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { - if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, - PGDIR_SHIFT, next, write, pages, &nr)) - break; - } else if (!gup_p4d_range(pgd, addr, next, write, pages, &nr)) - break; - } while (pgdp++, addr = next, addr != end); - local_irq_restore(flags); + if (gup_fast_permitted(start, nr_pages, write)) { + local_irq_save(flags); + gup_pgd_range(addr, end, write, pages, &nr); + local_irq_restore(flags); + } return nr; } -#ifndef gup_fast_permitted -/* - * Check if it's allowed to use __get_user_pages_fast() for the range, or - * we need to fall back to the slow version: - */ -bool gup_fast_permitted(unsigned long start, int nr_pages, int write) -{ - unsigned long len, end; - - len = (unsigned long) nr_pages << PAGE_SHIFT; - end = start + len; - return end >= start; -} -#endif - /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address @@ -1799,12 +1808,22 @@ bool gup_fast_permitted(unsigned long start, int nr_pages, int write) int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { + unsigned long addr, len, end; int nr = 0, ret = 0; start &= PAGE_MASK; + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + end = start + len; + + if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, + (void __user *)start, len))) + return 0; if (gup_fast_permitted(start, nr_pages, write)) { - nr = __get_user_pages_fast(start, nr_pages, write, pages); + local_irq_disable(); + gup_pgd_range(addr, end, write, pages, &nr); + local_irq_enable(); ret = nr; } From 68744edd6edcfa663ab917f62827919a0ca95b05 Mon Sep 17 00:00:00 2001 From: Ladislav Michl Date: Fri, 25 Aug 2017 07:39:16 +0200 Subject: [PATCH 0405/3326] iio: adc: ti-ads1015: add 10% to conversion wait time MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit fe895ac88b9fbdf2026f0bfd56c82747bb9d7c48 ] As user's guide "ADS1015EVM, ADS1115EVM, ADS1015EVM-PDK, ADS1115EVM-PDK User Guide (Rev. B)" (http://www.ti.com/lit/ug/sbau157b/sbau157b.pdf) states at page 16: "Note that both the ADS1115 and ADS1015 have internal clocks with a ±10% accuracy. If performing FFT tests, frequencies may appear to be incorrect as a result of this tolerance range.", add those 10% to converion wait time. Cc: Daniel Baluta Cc: Jonathan Cameron Signed-off-by: Ladislav Michl Reviewed-by: Akinobu Mita Signed-off-by: Jonathan Cameron Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/ti-ads1015.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index e0dc2048833572..9ac2fb032df6b5 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -369,6 +369,7 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]); conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]); + conv_time += conv_time / 10; /* 10% internal clock inaccuracy */ usleep_range(conv_time, conv_time + 1); data->conv_invalid = false; } From 53ce4d4aad93e2b48999ab9a225a84bfa54760fe Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 6 Jul 2017 23:53:11 -0500 Subject: [PATCH 0406/3326] iio: multiplexer: add NULL check on devm_kzalloc() and devm_kmemdup() return values [ Upstream commit dd92d5ea20ef8a42be7aeda08c669c586c730451 ] Check return values from call to devm_kzalloc() and devm_kmemup() in order to prevent a NULL pointer dereference. This issue was detected using Coccinelle and the following semantic patch: @@ expression x; identifier fld; @@ * x = devm_kzalloc(...); ... when != x == NULL x->fld Fixes: 7ba9df54b091 ("iio: multiplexer: new iio category and iio-mux driver") Signed-off-by: Gustavo A. R. Silva Acked-by: Peter Rosin Signed-off-by: Jonathan Cameron Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/iio/multiplexer/iio-mux.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c index 37ba007f8dcaae..74831fcd03136e 100644 --- a/drivers/iio/multiplexer/iio-mux.c +++ b/drivers/iio/multiplexer/iio-mux.c @@ -285,6 +285,9 @@ static int mux_configure_channel(struct device *dev, struct mux *mux, child->ext_info_cache = devm_kzalloc(dev, sizeof(*child->ext_info_cache) * num_ext_info, GFP_KERNEL); + if (!child->ext_info_cache) + return -ENOMEM; + for (i = 0; i < num_ext_info; ++i) { child->ext_info_cache[i].size = -1; @@ -309,6 +312,9 @@ static int mux_configure_channel(struct device *dev, struct mux *mux, child->ext_info_cache[i].data = devm_kmemdup(dev, page, ret + 1, GFP_KERNEL); + if (!child->ext_info_cache[i].data) + return -ENOMEM; + child->ext_info_cache[i].data[ret] = 0; child->ext_info_cache[i].size = ret; } From 340d45d70850ea395a7e397a94ab59972581022c Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Sat, 2 Sep 2017 13:09:46 -0700 Subject: [PATCH 0407/3326] locking/refcounts, x86/asm: Enable CONFIG_ARCH_HAS_REFCOUNT [ Upstream commit 39208aa7ecb7d9c4e86df782b5693270313cbab1 ] With the section inlining bug fixed for the x86 refcount protection, we can turn the config back on. Signed-off-by: Kees Cook Cc: Ard Biesheuvel Cc: Elena Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-arch Link: http://lkml.kernel.org/r/1504382986-49301-3-git-send-email-keescook@chromium.org Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2fdb23313dd55f..9bceea6a5852ed 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -56,7 +56,7 @@ config X86 select ARCH_HAS_KCOV if X86_64 select ARCH_HAS_PMEM_API if X86_64 # Causing hangs/crashes, see the commit that added this change for details. - select ARCH_HAS_REFCOUNT if BROKEN + select ARCH_HAS_REFCOUNT select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 select ARCH_HAS_SET_MEMORY select ARCH_HAS_SG_CHAIN From f8d07852819a0c2da239fca155342b4f80f4aedf Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Fri, 22 Sep 2017 14:40:47 +0530 Subject: [PATCH 0408/3326] powerpc/jprobes: Disable preemption when triggered through ftrace commit 6baea433bc84cd148af1c524389a8d756f67412e upstream. KPROBES_SANITY_TEST throws the below splat when CONFIG_PREEMPT is enabled: Kprobe smoke test: started DEBUG_LOCKS_WARN_ON(val > preempt_count()) ------------[ cut here ]------------ WARNING: CPU: 19 PID: 1 at kernel/sched/core.c:3094 preempt_count_sub+0xcc/0x140 Modules linked in: CPU: 19 PID: 1 Comm: swapper/0 Not tainted 4.13.0-rc7-nnr+ #97 task: c0000000fea80000 task.stack: c0000000feb00000 NIP: c00000000011d3dc LR: c00000000011d3d8 CTR: c000000000a090d0 REGS: c0000000feb03400 TRAP: 0700 Not tainted (4.13.0-rc7-nnr+) MSR: 8000000000021033 CR: 28000282 XER: 00000000 CFAR: c00000000015aa18 SOFTE: 0 NIP preempt_count_sub+0xcc/0x140 LR preempt_count_sub+0xc8/0x140 Call Trace: preempt_count_sub+0xc8/0x140 (unreliable) kprobe_handler+0x228/0x4b0 program_check_exception+0x58/0x3b0 program_check_common+0x16c/0x170 --- interrupt: 0 at kprobe_target+0x8/0x20 LR = init_test_probes+0x248/0x7d0 kp+0x0/0x80 (unreliable) livepatch_handler+0x38/0x74 init_kprobes+0x1d8/0x208 do_one_initcall+0x68/0x1d0 kernel_init_freeable+0x298/0x374 kernel_init+0x24/0x160 ret_from_kernel_thread+0x5c/0x70 Instruction dump: 419effdc 3d22001b 39299240 81290000 2f890000 409effc8 3c82ffcb 3c62ffcb 3884bc68 3863bc18 4803d5fd 60000000 <0fe00000> 4bffffa8 60000000 60000000 ---[ end trace 432dd46b4ce3d29f ]--- Kprobe smoke test: passed successfully The issue is that we aren't disabling preemption in kprobe_ftrace_handler(). Disable it. Fixes: ead514d5fb30a0 ("powerpc/kprobes: Add support for KPROBES_ON_FTRACE") Acked-by: Masami Hiramatsu Signed-off-by: Naveen N. Rao [mpe: Trim oops a little for formatting] Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/kprobes-ftrace.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c index 6c089d9757c9c1..2d81404f818c7e 100644 --- a/arch/powerpc/kernel/kprobes-ftrace.c +++ b/arch/powerpc/kernel/kprobes-ftrace.c @@ -65,6 +65,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, /* Disable irq for emulating a breakpoint and avoiding preempt */ local_irq_save(flags); hard_irq_disable(); + preempt_disable(); p = get_kprobe((kprobe_opcode_t *)nip); if (unlikely(!p) || kprobe_disabled(p)) @@ -86,12 +87,18 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (!p->pre_handler || !p->pre_handler(p, regs)) __skip_singlestep(p, regs, kcb, orig_nip); - /* - * If pre_handler returns !0, it sets regs->nip and - * resets current kprobe. - */ + else { + /* + * If pre_handler returns !0, it sets regs->nip and + * resets current kprobe. In this case, we still need + * to restore irq, but not preemption. + */ + local_irq_restore(flags); + return; + } } end: + preempt_enable_no_resched(); local_irq_restore(flags); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); From 1ffabfc1d58ba586fa11cc3cde21d54afec03387 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Mon, 23 Oct 2017 22:07:38 +0530 Subject: [PATCH 0409/3326] powerpc/kprobes: Disable preemption before invoking probe handler for optprobes commit 8a2d71a3f2737e2448aa68de2b6052cb570d3d2a upstream. Per Documentation/kprobes.txt, probe handlers need to be invoked with preemption disabled. Update optimized_callback() to do so. Also move get_kprobe_ctlblk() invocation post preemption disable, since it accesses pre-cpu data. This was not an issue so far since optprobes wasn't selected if CONFIG_PREEMPT was enabled. Commit a30b85df7d599f ("kprobes: Use synchronize_rcu_tasks() for optprobe with CONFIG_PREEMPT=y") changes this. Signed-off-by: Naveen N. Rao Acked-by: Masami Hiramatsu Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/optprobes.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 91e037ab20a197..60ba7f1370a80c 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -115,7 +115,6 @@ static unsigned long can_optimize(struct kprobe *p) static void optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) { - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); unsigned long flags; /* This is possible if op is under delayed unoptimizing */ @@ -124,13 +123,14 @@ static void optimized_callback(struct optimized_kprobe *op, local_irq_save(flags); hard_irq_disable(); + preempt_disable(); if (kprobe_running()) { kprobes_inc_nmissed_count(&op->kp); } else { __this_cpu_write(current_kprobe, &op->kp); regs->nip = (unsigned long)op->kp.addr; - kcb->kprobe_status = KPROBE_HIT_ACTIVE; + get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; opt_pre_handler(&op->kp, regs); __this_cpu_write(current_kprobe, NULL); } @@ -140,6 +140,7 @@ static void optimized_callback(struct optimized_kprobe *op, * local_irq_restore() will re-enable interrupts, * if they were hard disabled. */ + preempt_enable_no_resched(); local_irq_restore(flags); } NOKPROBE_SYMBOL(optimized_callback); From 779bfa90bdf97a6d00459471f6d371f6036f3da5 Mon Sep 17 00:00:00 2001 From: Dominik Behr Date: Thu, 7 Sep 2017 16:02:46 -0300 Subject: [PATCH 0410/3326] dma-buf/sw_sync: force signal all unsignaled fences on dying timeline commit ea4d5a270b57fa8d4871f372ca9b97b7697fdfda upstream. To avoid hanging userspace components that might have been waiting on the active fences of the destroyed timeline we need to signal with error all remaining fences on such timeline. This restore the default behaviour of the Android sw_sync framework, which Android still relies on. It was broken on the dma fence conversion a few years ago and never fixed. v2: Do not bother with cleanup do the list (Chris Wilson) Reviewed-by: Chris Wilson Signed-off-by: Dominik Behr Signed-off-by: Gustavo Padovan Link: https://patchwork.freedesktop.org/patch/msgid/20170907190246.16425-2-gustavo@padovan.org Cc: Jisheng Zhang Signed-off-by: Greg Kroah-Hartman --- drivers/dma-buf/sw_sync.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 38cc7389a6c172..24f83f9eeaedce 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -321,8 +321,16 @@ static int sw_sync_debugfs_open(struct inode *inode, struct file *file) static int sw_sync_debugfs_release(struct inode *inode, struct file *file) { struct sync_timeline *obj = file->private_data; + struct sync_pt *pt, *next; + + spin_lock_irq(&obj->lock); + + list_for_each_entry_safe(pt, next, &obj->pt_list, link) { + dma_fence_set_error(&pt->base, -ENOENT); + dma_fence_signal_locked(&pt->base); + } - smp_wmb(); + spin_unlock_irq(&obj->lock); sync_timeline_put(obj); return 0; From a633c7ed4a4c6097475de05d950e35cf92b8dec7 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Thu, 9 Nov 2017 09:16:09 +0000 Subject: [PATCH 0411/3326] staging: ccree: fix leak of import() after init() commit c5f39d07860c35e5e4c63188139465af790f86ce upstream. crypto_ahash_import() may be called either after crypto_ahash_init() or without such call. Right now we always internally call init() as part of import(), thus leaking memory and mappings if the user has already called init() herself. Fix this by only calling init() internally if the state is not already initialized. Fixes: commit 454527d0d94f ("staging: ccree: fix hash import/export") Signed-off-by: Gilad Ben-Yossef Reviewed-by: Dan Carpenter Signed-off-by: Greg Kroah-Hartman --- drivers/staging/ccree/ssi_hash.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c index 13291aeaf350ba..f72ca485c86f68 100644 --- a/drivers/staging/ccree/ssi_hash.c +++ b/drivers/staging/ccree/ssi_hash.c @@ -1790,9 +1790,12 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in) } in += sizeof(u32); - rc = ssi_hash_init(state, ctx); - if (rc) - goto out; + /* call init() to allocate bufs if the user hasn't */ + if (!state->digest_buff) { + rc = ssi_hash_init(state, ctx); + if (rc) + goto out; + } dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); From 755532dd5f643f1a1da479bb133a946ce5c97905 Mon Sep 17 00:00:00 2001 From: Mike Looijmans Date: Thu, 9 Nov 2017 13:16:46 +0100 Subject: [PATCH 0412/3326] usb: hub: Cycle HUB power when initialization fails commit 973593a960ddac0f14f0d8877d2d0abe0afda795 upstream. Sometimes the USB device gets confused about the state of the initialization and the connection fails. In particular, the device thinks that it's already set up and running while the host thinks the device still needs to be configured. To work around this issue, power-cycle the hub's output to issue a sort of "reset" to the device. This makes the device restart its state machine and then the initialization succeeds. This fixes problems where the kernel reports a list of errors like this: usb 1-1.3: device not accepting address 19, error -71 The end result is a non-functioning device. After this patch, the sequence becomes like this: usb 1-1.3: new high-speed USB device number 18 using ci_hdrc usb 1-1.3: device not accepting address 18, error -71 usb 1-1.3: new high-speed USB device number 19 using ci_hdrc usb 1-1.3: device not accepting address 19, error -71 usb 1-1-port3: attempt power cycle usb 1-1.3: new high-speed USB device number 21 using ci_hdrc usb-storage 1-1.3:1.2: USB Mass Storage device detected Signed-off-by: Mike Looijmans Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/hub.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index e9ce6bb0b22d15..8f7d94239ee3e3 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4935,6 +4935,15 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, usb_put_dev(udev); if ((status == -ENOTCONN) || (status == -ENOTSUPP)) break; + + /* When halfway through our retry count, power-cycle the port */ + if (i == (SET_CONFIG_TRIES / 2) - 1) { + dev_info(&port_dev->dev, "attempt power cycle\n"); + usb_hub_set_port_power(hdev, hub, port1, false); + msleep(2 * hub_power_on_good_delay(hub)); + usb_hub_set_port_power(hdev, hub, port1, true); + msleep(hub_power_on_good_delay(hub)); + } } if (hub->hdev->parent || !hcd->driver->port_handed_over || From 8bc546bbfec9be32108af2684a2766629acc052d Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Sat, 11 Nov 2017 16:31:18 +0100 Subject: [PATCH 0413/3326] USB: ulpi: fix bus-node lookup commit 33c309ebc797b908029fd3a0851aefe697e9b598 upstream. Fix bus-node lookup during registration, which ended up searching the whole device tree depth-first starting at the parent (or grand parent) rather than just matching on its children. To make things worse, the parent (or grand-parent) node could end being prematurely freed as well. Fixes: ef6a7bcfb01c ("usb: ulpi: Support device discovery via DT") Reported-by: Peter Robinson Reported-by: Stephen Boyd Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/common/ulpi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index 4aa5195db8ea53..e02acfb1ca95f5 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -183,9 +183,9 @@ static int ulpi_of_register(struct ulpi *ulpi) /* Find a ulpi bus underneath the parent or the grandparent */ parent = ulpi->dev.parent; if (parent->of_node) - np = of_find_node_by_name(parent->of_node, "ulpi"); + np = of_get_child_by_name(parent->of_node, "ulpi"); else if (parent->parent && parent->parent->of_node) - np = of_find_node_by_name(parent->parent->of_node, "ulpi"); + np = of_get_child_by_name(parent->parent->of_node, "ulpi"); if (!np) return 0; From 33288619482a876f2575ed308ac2c1c3199a6a1a Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Fri, 1 Dec 2017 13:41:19 +0200 Subject: [PATCH 0414/3326] xhci: Don't show incorrect WARN message about events for empty rings commit e4ec40ec4b260efcca15089de4285a0a3411259b upstream. xHC can generate two events for a short transfer if the short TRB and last TRB in the TD are not the same TRB. The driver will handle the TD after the first short event, and remove it from its internal list. Driver then incorrectly prints a warning for the second event: "WARN Event TRB for slot x ep y with no TDs queued" Fix this by not printing a warning if we get a event on a empty list if the previous event was a short event. Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-ring.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 82c746e2d85c0f..353520005c1358 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2486,12 +2486,16 @@ static int handle_tx_event(struct xhci_hcd *xhci, */ if (list_empty(&ep_ring->td_list)) { /* - * A stopped endpoint may generate an extra completion - * event if the device was suspended. Don't print - * warnings. + * Don't print wanings if it's due to a stopped endpoint + * generating an extra completion event if the device + * was suspended. Or, a event for the last TRB of a + * short TD we already got a short event for. + * The short TD is already removed from the TD list. */ + if (!(trb_comp_code == COMP_STOPPED || - trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { + trb_comp_code == COMP_STOPPED_LENGTH_INVALID || + ep_ring->last_td_was_short)) { xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index); From 4346c775cf707a703e2b305315e37b9beee30bd6 Mon Sep 17 00:00:00 2001 From: Yu Chen Date: Fri, 1 Dec 2017 13:41:20 +0200 Subject: [PATCH 0415/3326] usb: xhci: fix panic in xhci_free_virt_devices_depth_first commit 80e457699a8dbdd70f2d26911e46f538645c55fc upstream. Check vdev->real_port 0 to avoid panic [ 9.261347] [] xhci_free_virt_devices_depth_first+0x58/0x108 [ 9.261352] [] xhci_mem_cleanup+0x1bc/0x570 [ 9.261355] [] xhci_stop+0x140/0x1c8 [ 9.261365] [] usb_remove_hcd+0xfc/0x1d0 [ 9.261369] [] xhci_plat_remove+0x6c/0xa8 [ 9.261377] [] platform_drv_remove+0x2c/0x70 [ 9.261384] [] __device_release_driver+0x80/0x108 [ 9.261387] [] device_release_driver+0x2c/0x40 [ 9.261392] [] bus_remove_device+0xe0/0x120 [ 9.261396] [] device_del+0x114/0x210 [ 9.261399] [] platform_device_del+0x30/0xa0 [ 9.261403] [] dwc3_otg_work+0x204/0x488 [ 9.261407] [] event_work+0x304/0x5b8 [ 9.261414] [] process_one_work+0x148/0x490 [ 9.261417] [] worker_thread+0x50/0x4a0 [ 9.261421] [] kthread+0xe8/0x100 [ 9.261427] [] ret_from_fork+0x10/0x50 The problem can occur if xhci_plat_remove() is called shortly after xhci_plat_probe(). While xhci_free_virt_devices_depth_first been called before the device has been setup and get real_port initialized. The problem occurred on Hikey960 and was reproduced by Guenter Roeck on Kevin with chromeos-4.4. Fixes: ee8665e28e8d ("xhci: free xhci virtual devices with leaf nodes first") Cc: Guenter Roeck Reviewed-by: Guenter Roeck Tested-by: Guenter Roeck Signed-off-by: Fan Ning Signed-off-by: Li Rui Signed-off-by: yangdi Signed-off-by: Yu Chen Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-mem.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 2a82c927ded21d..97f30eb7dac0d0 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -947,6 +947,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) if (!vdev) return; + if (vdev->real_port == 0 || + vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { + xhci_dbg(xhci, "Bad vdev->real_port.\n"); + goto out; + } + tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts); list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { /* is this a hub device that added a tt_info to the tts list */ @@ -960,6 +966,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) } } } +out: /* we are now at a leaf device */ xhci_free_virt_device(xhci, slot_id); } From 903b2d14d0e57e48fd80d5a9d35d9fbf3c1e63ea Mon Sep 17 00:00:00 2001 From: Masakazu Mokuno Date: Fri, 10 Nov 2017 01:25:50 +0900 Subject: [PATCH 0416/3326] USB: core: Add type-specific length check of BOS descriptors commit 81cf4a45360f70528f1f64ba018d61cb5767249a upstream. As most of BOS descriptors are longer in length than their header 'struct usb_dev_cap_header', comparing solely with it is not sufficient to avoid out-of-bounds access to BOS descriptors. This patch adds descriptor type specific length check in usb_get_bos_descriptor() to fix the issue. Signed-off-by: Masakazu Mokuno Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/config.c | 28 ++++++++++++++++++++++++---- include/uapi/linux/usb/ch9.h | 3 +++ 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 883549ee946cb5..c42a3e63eb07cc 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -905,14 +905,25 @@ void usb_release_bos_descriptor(struct usb_device *dev) } } +static const __u8 bos_desc_len[256] = { + [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE, + [USB_CAP_TYPE_EXT] = USB_DT_USB_EXT_CAP_SIZE, + [USB_SS_CAP_TYPE] = USB_DT_USB_SS_CAP_SIZE, + [USB_SSP_CAP_TYPE] = USB_DT_USB_SSP_CAP_SIZE(1), + [CONTAINER_ID_TYPE] = USB_DT_USB_SS_CONTN_ID_SIZE, + [USB_PTM_CAP_TYPE] = USB_DT_USB_PTM_ID_SIZE, +}; + /* Get BOS descriptor set */ int usb_get_bos_descriptor(struct usb_device *dev) { struct device *ddev = &dev->dev; struct usb_bos_descriptor *bos; struct usb_dev_cap_header *cap; + struct usb_ssp_cap_descriptor *ssp_cap; unsigned char *buffer; - int length, total_len, num, i; + int length, total_len, num, i, ssac; + __u8 cap_type; int ret; bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL); @@ -965,7 +976,13 @@ int usb_get_bos_descriptor(struct usb_device *dev) dev->bos->desc->bNumDeviceCaps = i; break; } + cap_type = cap->bDevCapabilityType; length = cap->bLength; + if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) { + dev->bos->desc->bNumDeviceCaps = i; + break; + } + total_len -= length; if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { @@ -973,7 +990,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) continue; } - switch (cap->bDevCapabilityType) { + switch (cap_type) { case USB_CAP_TYPE_WIRELESS_USB: /* Wireless USB cap descriptor is handled by wusb */ break; @@ -986,8 +1003,11 @@ int usb_get_bos_descriptor(struct usb_device *dev) (struct usb_ss_cap_descriptor *)buffer; break; case USB_SSP_CAP_TYPE: - dev->bos->ssp_cap = - (struct usb_ssp_cap_descriptor *)buffer; + ssp_cap = (struct usb_ssp_cap_descriptor *)buffer; + ssac = (le32_to_cpu(ssp_cap->bmAttributes) & + USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1; + if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac)) + dev->bos->ssp_cap = ssp_cap; break; case CONTAINER_ID_TYPE: dev->bos->ss_id = diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index cec06625f4076c..8512777889b02c 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -876,6 +876,8 @@ struct usb_wireless_cap_descriptor { /* Ultra Wide Band */ __u8 bReserved; } __attribute__((packed)); +#define USB_DT_USB_WIRELESS_CAP_SIZE 11 + /* USB 2.0 Extension descriptor */ #define USB_CAP_TYPE_EXT 2 @@ -1068,6 +1070,7 @@ struct usb_ptm_cap_descriptor { __u8 bDevCapabilityType; } __attribute__((packed)); +#define USB_DT_USB_PTM_ID_SIZE 3 /* * The size of the descriptor for the Sublink Speed Attribute Count * (SSAC) specified in bmAttributes[4:0]. From 15f12be9860e380ccc809f0bb939b9ea385be9b1 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Thu, 23 Nov 2017 16:39:52 +0100 Subject: [PATCH 0417/3326] USB: usbfs: Filter flags passed in from user space commit 446f666da9f019ce2ffd03800995487e79a91462 upstream. USBDEVFS_URB_ISO_ASAP must be accepted only for ISO endpoints. Improve sanity checking. Reported-by: Andrey Konovalov Signed-off-by: Oliver Neukum Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/devio.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 98c666ef9a5756..ab245352f102a6 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1455,14 +1455,18 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb int number_of_packets = 0; unsigned int stream_id = 0; void *buf; - - if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP | - USBDEVFS_URB_SHORT_NOT_OK | + unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | USBDEVFS_URB_BULK_CONTINUATION | USBDEVFS_URB_NO_FSBR | USBDEVFS_URB_ZERO_PACKET | - USBDEVFS_URB_NO_INTERRUPT)) - return -EINVAL; + USBDEVFS_URB_NO_INTERRUPT; + /* USBDEVFS_URB_ISO_ASAP is a special case */ + if (uurb->type == USBDEVFS_URB_TYPE_ISO) + mask |= USBDEVFS_URB_ISO_ASAP; + + if (uurb->flags & ~mask) + return -EINVAL; + if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX) return -EINVAL; if (uurb->buffer_length > 0 && !uurb->buffer) From de667b08c34c61b59fb1a71ba0eff648550a7799 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 7 Nov 2017 16:45:04 +0000 Subject: [PATCH 0418/3326] usb: host: fix incorrect updating of offset commit 1d5a31582ef046d3b233f0da1a68ae26519b2f0a upstream. The variable temp is incorrectly being updated, instead it should be offset otherwise the loop just reads the same capability value and loops forever. Thanks to Alan Stern for pointing out the correct fix to my original fix. Fix also cleans up clang warning: drivers/usb/host/ehci-dbg.c:840:4: warning: Value stored to 'temp' is never read Fixes: d49d43174400 ("USB: misc ehci updates") Signed-off-by: Colin Ian King Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/ehci-dbg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index cbb9b8e12c3ce7..8c5a6fee4dfdf3 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c @@ -837,7 +837,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf) default: /* unknown */ break; } - temp = (cap >> 8) & 0xff; + offset = (cap >> 8) & 0xff; } } #endif From c3b0874866f2282f51295dc4cf69672b2caf6039 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 4 Dec 2017 17:24:54 -0800 Subject: [PATCH 0419/3326] locking/refcounts: Do not force refcount_t usage as GPL-only export commit b562c171cf011d297059bd0265742eb5fab0ad2f upstream. The refcount_t protection on x86 was not intended to use the stricter GPL export. This adjusts the linkage again to avoid a regression in the availability of the refcount API. Reported-by: Dave Airlie Fixes: 7a46ec0e2f48 ("locking/refcounts, x86/asm: Implement fast refcount overflow protection") Signed-off-by: Kees Cook Signed-off-by: Linus Torvalds Cc: Ivan Kozik Cc: Thomas Backlund Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/extable.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 3321b446b66cdb..30bc4812ceb8a1 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -82,7 +82,7 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup, return true; } -EXPORT_SYMBOL_GPL(ex_handler_refcount); +EXPORT_SYMBOL(ex_handler_refcount); /* * Handler for when we fail to restore a task's FPU state. We should never get From 64138f0adb25ca8f34baa57af33260b05efe2874 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sun, 10 Dec 2017 13:40:45 +0100 Subject: [PATCH 0420/3326] Linux 4.14.5 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ba1648c093fea2..43ac7bdb10adad 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 4 +SUBLEVEL = 5 EXTRAVERSION = NAME = Petit Gorille From cb6c13d914e0f5b7badc3ec455114e4b45177c2a Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Mon, 13 Nov 2017 17:59:18 +0900 Subject: [PATCH 0421/3326] usb: gadget: udc: renesas_usb3: fix number of the pipes commit a58204ab91ad8cae4d8474aa0ba5d1fc504860c9 upstream. This controller on R-Car Gen3 has 6 pipes that included PIPE 0 for control actually. But, the datasheet has error in writing as it has 31 pipes. (However, the previous code defined 30 pipes wrongly...) Anyway, this patch fixes it. Fixes: 746bfe63bba3 ("usb: gadget: renesas_usb3: add support for Renesas USB3.0 peripheral controller") Signed-off-by: Yoshihiro Shimoda Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/udc/renesas_usb3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 63a20612205896..6b3e8adb64e683 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -254,7 +254,7 @@ #define USB3_EP0_SS_MAX_PACKET_SIZE 512 #define USB3_EP0_HSFS_MAX_PACKET_SIZE 64 #define USB3_EP0_BUF_SIZE 8 -#define USB3_MAX_NUM_PIPES 30 +#define USB3_MAX_NUM_PIPES 6 /* This includes PIPE 0 */ #define USB3_WAIT_US 3 #define USB3_DMA_NUM_SETTING_AREA 4 /* From 28ddc2b45f5f409b905dc78e79d959377789fc12 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Tue, 31 Oct 2017 15:56:29 +0200 Subject: [PATCH 0422/3326] usb: gadget: core: Fix ->udc_set_speed() speed handling commit a4f0927ef588cf62bb864707261482c874352942 upstream. Currently UDC core calls ->udc_set_speed() with the speed parameter containing the maximum speed supported by the gadget function driver. This might very well be more than that supported by the UDC controller driver. Select the lesser of the 2 speeds so both UDC and gadget function driver are operating within limits. This fixes PHY Erratic errors and 2 second enumeration delay on TI's AM437x platforms. Fixes: 6099eca796ae ("usb: gadget: core: introduce ->udc_set_speed() method") Reported-by: Dylan Howey Signed-off-by: Roger Quadros Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/udc/core.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index d41d07aae0cec0..def1b05ffca0b1 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1080,8 +1080,12 @@ static inline void usb_gadget_udc_stop(struct usb_udc *udc) static inline void usb_gadget_udc_set_speed(struct usb_udc *udc, enum usb_device_speed speed) { - if (udc->gadget->ops->udc_set_speed) - udc->gadget->ops->udc_set_speed(udc->gadget, speed); + if (udc->gadget->ops->udc_set_speed) { + enum usb_device_speed s; + + s = min(speed, udc->gadget->max_speed); + udc->gadget->ops->udc_set_speed(udc->gadget, s); + } } /** From d415ccdd0be1a745904a7b12b87437463aa35a8d Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Fri, 3 Nov 2017 15:30:52 +0100 Subject: [PATCH 0423/3326] serdev: ttyport: add missing receive_buf sanity checks commit eb281683621b71ab9710d9dccbbef0c2e1769c97 upstream. The receive_buf tty-port callback should return the number of bytes accepted and must specifically never return a negative errno (or a value larger than the buffer size) to the tty layer. A serdev driver not providing a receive_buf callback would currently cause the flush_to_ldisc() worker to spin in a tight loop when the tty buffer pointers are incremented with -EINVAL (-22) after data has been received. A serdev driver occasionally returning a negative errno (or a too large byte count) could cause information leaks or crashes when accessing memory outside the tty buffers in consecutive callbacks. Fixes: cd6484e1830b ("serdev: Introduce new bus for serial attached devices") Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serdev/serdev-ttyport.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index 302018d67efa97..d4efe43544019b 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c @@ -35,11 +35,22 @@ static int ttyport_receive_buf(struct tty_port *port, const unsigned char *cp, { struct serdev_controller *ctrl = port->client_data; struct serport *serport = serdev_controller_get_drvdata(ctrl); + int ret; if (!test_bit(SERPORT_ACTIVE, &serport->flags)) return 0; - return serdev_controller_receive_buf(ctrl, cp, count); + ret = serdev_controller_receive_buf(ctrl, cp, count); + + dev_WARN_ONCE(&ctrl->dev, ret < 0 || ret > count, + "receive_buf returns %d (count = %zu)\n", + ret, count); + if (ret < 0) + return 0; + else if (ret > count) + return count; + + return ret; } static void ttyport_write_wakeup(struct tty_port *port) From 5a3da8bd93c78b6a4aed19c83cb8f493dfc4dfef Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Fri, 3 Nov 2017 15:30:55 +0100 Subject: [PATCH 0424/3326] serdev: ttyport: fix NULL-deref on hangup commit 8bcd4e6a8decac251d55c4377e2e67f052777ce0 upstream. Make sure to use a properly refcounted tty_struct in write_wake up to avoid dereferencing a NULL-pointer when a port is being hung up. Fixes: bed35c6dfa6a ("serdev: add a tty port controller driver") Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serdev/serdev-ttyport.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index d4efe43544019b..ce7fc44f2fe2a7 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c @@ -57,12 +57,19 @@ static void ttyport_write_wakeup(struct tty_port *port) { struct serdev_controller *ctrl = port->client_data; struct serport *serport = serdev_controller_get_drvdata(ctrl); + struct tty_struct *tty; + + tty = tty_port_tty_get(port); + if (!tty) + return; - if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &port->tty->flags) && + if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) && test_bit(SERPORT_ACTIVE, &serport->flags)) serdev_controller_write_wakeup(ctrl); - wake_up_interruptible_poll(&port->tty->write_wait, POLLOUT); + wake_up_interruptible_poll(&tty->write_wait, POLLOUT); + + tty_kref_put(tty); } static const struct tty_port_client_operations client_ops = { From ce42ed5ae8982360dba581acfafdaff88f2b3fbe Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Fri, 3 Nov 2017 15:30:56 +0100 Subject: [PATCH 0425/3326] serdev: ttyport: fix tty locking in close commit 90dbad8cd6efccbdce109d5ef0724f8434a6cdde upstream. Make sure to hold the tty lock as required when calling tty-driver close() (e.g. to avoid racing with hangup()). Note that the serport active flag is currently set under the lock at controller open, but really isn't protected by it. Fixes: cd6484e1830b ("serdev: Introduce new bus for serial attached devices") Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serdev/serdev-ttyport.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index ce7fc44f2fe2a7..7f785d77ba7f40 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c @@ -149,8 +149,10 @@ static void ttyport_close(struct serdev_controller *ctrl) clear_bit(SERPORT_ACTIVE, &serport->flags); + tty_lock(tty); if (tty->ops->close) tty->ops->close(tty, NULL); + tty_unlock(tty); tty_release_struct(tty, serport->tty_idx); } From 4dc9c1cfa9fffed0ec812a85e4b06a68efb6afea Mon Sep 17 00:00:00 2001 From: John Keeping Date: Mon, 27 Nov 2017 18:15:40 +0000 Subject: [PATCH 0426/3326] usb: f_fs: Force Reserved1=1 in OS_DESC_EXT_COMPAT commit a3acc696085e112733d191a77b106e67a4fa110b upstream. The specification says that the Reserved1 field in OS_DESC_EXT_COMPAT must have the value "1", but when this feature was first implemented we rejected any non-zero values. This was adjusted to accept all non-zero values (while now rejecting zero) in commit 53642399aa71 ("usb: gadget: f_fs: Fix wrong check on reserved1 of OS_DESC_EXT_COMPAT"), but that breaks any userspace programs that worked previously by returning EINVAL when Reserved1 == 0 which was previously the only value that succeeded! If we just set the field to "1" ourselves, both old and new userspace programs continue to work correctly and, as a bonus, old programs are now compliant with the specification without having to fix anything themselves. Fixes: 53642399aa71 ("usb: gadget: f_fs: Fix wrong check on reserved1 of OS_DESC_EXT_COMPAT") Signed-off-by: John Keeping Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/f_fs.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index ef8f7d63a8f0a4..0202e5132fa78f 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -2286,9 +2286,18 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, int i; if (len < sizeof(*d) || - d->bFirstInterfaceNumber >= ffs->interfaces_count || - !d->Reserved1) + d->bFirstInterfaceNumber >= ffs->interfaces_count) return -EINVAL; + if (d->Reserved1 != 1) { + /* + * According to the spec, Reserved1 must be set to 1 + * but older kernels incorrectly rejected non-zero + * values. We fix it here to avoid returning EINVAL + * in response to values we used to accept. + */ + pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n"); + d->Reserved1 = 1; + } for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) if (d->Reserved2[i]) return -EINVAL; From ba4eed1bd4d82ed591f07eee628b1efade7e6764 Mon Sep 17 00:00:00 2001 From: Martin Kelly Date: Mon, 27 Nov 2017 15:49:16 -0800 Subject: [PATCH 0427/3326] can: mcba_usb: fix device disconnect bug commit 1cb35a33a28394fd711bb26ddf3a564f4e9d9125 upstream. Currently, when you disconnect the device, the driver infinitely resubmits all URBs, so you see: Rx URB aborted (-32) in an infinite loop. Fix this by catching -EPIPE (what we get in urb->status when the device disconnects) and not resubmitting. With this patch, I can plug and unplug many times and the driver recovers correctly. Signed-off-by: Martin Kelly Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/usb/mcba_usb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 7f0272558befe9..a884d31fe324c9 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -592,6 +592,7 @@ static void mcba_usb_read_bulk_callback(struct urb *urb) break; case -ENOENT: + case -EPIPE: case -ESHUTDOWN: return; From 5b65e2916c4ba62ce543cd1366a6649a9ff5bcb2 Mon Sep 17 00:00:00 2001 From: Stephane Grosjean Date: Thu, 23 Nov 2017 15:44:35 +0100 Subject: [PATCH 0428/3326] can: peak/pci: fix potential bug when probe() fails commit 5c2cb02edf79ad79d9b8d07c6d52243a948c4c9f upstream. PCI/PCIe drivers for PEAK-System CAN/CAN-FD interfaces do some access to the PCI config during probing. In case one of these accesses fails, a POSITIVE PCIBIOS_xxx error code is returned back. This POSITIVE error code MUST be converted into a NEGATIVE errno for the probe() function to indicate it failed. Using the pcibios_err_to_errno() function, we make sure that the return code will always be negative. Signed-off-by: Stephane Grosjean Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/peak_canfd/peak_pciefd_main.c | 5 ++++- drivers/net/can/sja1000/peak_pci.c | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index b4efd711f824cc..788c3464a3b0e9 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -825,7 +825,10 @@ static int peak_pciefd_probe(struct pci_dev *pdev, err_disable_pci: pci_disable_device(pdev); - return err; + /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while + * the probe() function must return a negative errno in case of failure + * (err is unchanged if negative) */ + return pcibios_err_to_errno(err); } /* free the board structure object, as well as its resources: */ diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 131026fbc2d77c..5adc95c922eef2 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -717,7 +717,10 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) failure_disable_pci: pci_disable_device(pdev); - return err; + /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while + * the probe() function must return a negative errno in case of failure + * (err is unchanged if negative) */ + return pcibios_err_to_errno(err); } static void peak_pci_remove(struct pci_dev *pdev) From 39797254631553b51b2e64d5160ff8709a66254c Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Mon, 27 Nov 2017 09:18:21 +0100 Subject: [PATCH 0429/3326] can: flexcan: fix VF610 state transition issue commit 29c64b17a0bc72232acc45e9533221d88a262efb upstream. Enable FLEXCAN_QUIRK_BROKEN_PERR_STATE for VF610 to report correct state transitions. Tested-by: Mirza Krak Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/flexcan.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index a13a4896a8bdda..c4d1140116ea26 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -189,7 +189,7 @@ * MX35 FlexCAN2 03.00.00.00 no no ? no no * MX53 FlexCAN2 03.00.00.00 yes no no no no * MX6s FlexCAN3 10.00.12.00 yes yes no no yes - * VF610 FlexCAN3 ? no yes ? yes yes? + * VF610 FlexCAN3 ? no yes no yes yes? * * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. */ @@ -297,7 +297,8 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { static const struct flexcan_devtype_data fsl_vf610_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | + FLEXCAN_QUIRK_BROKEN_PERR_STATE, }; static const struct can_bittiming_const flexcan_bittiming_const = { From 63f2bd86ccc0a2864bdb0d9bb7bb6735276019af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Oliver=20St=C3=A4bler?= Date: Mon, 20 Nov 2017 14:45:15 +0100 Subject: [PATCH 0430/3326] can: ti_hecc: Fix napi poll return value for repoll MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit f6c23b174c3c96616514827407769cbcfc8005cf upstream. After commit d75b1ade567f ("net: less interrupt masking in NAPI") napi repoll is done only when work_done == budget. So we need to return budget if there are still packets to receive. Signed-off-by: Oliver Stäbler Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/ti_hecc.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 4d4941469cfc06..db6ea936dc3fc3 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -637,6 +637,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota) mbx_mask = hecc_read(priv, HECC_CANMIM); mbx_mask |= HECC_TX_MBOX_MASK; hecc_write(priv, HECC_CANMIM, mbx_mask); + } else { + /* repoll is done only if whole budget is used */ + num_pkts = quota; } return num_pkts; From 896ae65aabc8438a3fa9405e3996bd59e3372055 Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Tue, 21 Nov 2017 08:22:26 +0100 Subject: [PATCH 0431/3326] can: kvaser_usb: free buf in error paths commit 435019b48033138581a6171093b181fc6b4d3d30 upstream. The allocated buffer was not freed if usb_submit_urb() failed. Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/usb/kvaser_usb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 9b18d96ef52633..0756445914981e 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, if (err) { netdev_err(netdev, "Error transmitting URB\n"); usb_unanchor_urb(urb); + kfree(buf); usb_free_urb(urb); return err; } @@ -1768,6 +1769,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); usb_unanchor_urb(urb); + kfree(buf); stats->tx_dropped++; From 5250169e7390db30c60d35515b6b40501764164d Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Tue, 21 Nov 2017 08:22:27 +0100 Subject: [PATCH 0432/3326] can: kvaser_usb: Fix comparison bug in kvaser_usb_read_bulk_callback() commit e84f44eb5523401faeb9cc1c97895b68e3cfb78d upstream. The conditon in the while-loop becomes true when actual_length is less than 2 (MSG_HEADER_LEN). In best case we end up with a former, already dispatched msg, that got msg->len greater than actual_length. This will result in a "Format error" error printout. Problem seen when unplugging a Kvaser USB device connected to a vbox guest. warning: comparison between signed and unsigned integer expressions [-Wsign-compare] Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/usb/kvaser_usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 0756445914981e..d87e330a20b3f4 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -1334,7 +1334,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) goto resubmit_urb; } - while (pos <= urb->actual_length - MSG_HEADER_LEN) { + while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) { msg = urb->transfer_buffer + pos; /* The Kvaser firmware can only read and write messages that From 6f4fd93f349036ebeb136838444336f376f0e7db Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Tue, 21 Nov 2017 08:22:28 +0100 Subject: [PATCH 0433/3326] can: kvaser_usb: ratelimit errors if incomplete messages are received commit 8bd13bd522ff7dfa0eb371921aeb417155f7a3be upstream. Avoid flooding the kernel log with "Formate error", if incomplete message are received. Signed-off-by: Jimmy Assarsson Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/usb/kvaser_usb.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index d87e330a20b3f4..f95945915d209d 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, } if (pos + tmp->len > actual_len) { - dev_err(dev->udev->dev.parent, - "Format error\n"); + dev_err_ratelimited(dev->udev->dev.parent, + "Format error\n"); break; } @@ -1353,7 +1353,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) } if (pos + msg->len > urb->actual_length) { - dev_err(dev->udev->dev.parent, "Format error\n"); + dev_err_ratelimited(dev->udev->dev.parent, + "Format error\n"); break; } From d04d52a6f268b635b4e69cbe28beaf5fd19228e1 Mon Sep 17 00:00:00 2001 From: Martin Kelly Date: Tue, 5 Dec 2017 11:15:49 -0800 Subject: [PATCH 0434/3326] can: kvaser_usb: cancel urb on -EPIPE and -EPROTO commit 6aa8d5945502baf4687d80de59b7ac865e9e666b upstream. In mcba_usb, we have observed that when you unplug the device, the driver will endlessly resubmit failing URBs, which can cause CPU stalls. This issue is fixed in mcba_usb by catching the codes seen on device disconnect (-EPIPE and -EPROTO). This driver also resubmits in the case of -EPIPE and -EPROTO, so fix it in the same way. Signed-off-by: Martin Kelly Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/usb/kvaser_usb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index f95945915d209d..63587b8e6825ad 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -1326,6 +1326,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) case 0: break; case -ENOENT: + case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; default: From b96f17231dca5ddc2136999ca147cdeb842b2664 Mon Sep 17 00:00:00 2001 From: Martin Kelly Date: Tue, 5 Dec 2017 10:34:03 -0800 Subject: [PATCH 0435/3326] can: mcba_usb: cancel urb on -EPROTO commit c7f33023308f3142433b7379718af5f0c2c322a6 upstream. When we unplug the device, we can see both -EPIPE and -EPROTO depending on exact timing and what system we run on. If we continue to resubmit URBs, they will immediately fail, and they can cause stalls, especially on slower CPUs. Fix this by not resubmitting on -EPROTO, as we already do on -EPIPE. Signed-off-by: Martin Kelly Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/usb/mcba_usb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index a884d31fe324c9..e0c24abce16ce9 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -593,6 +593,7 @@ static void mcba_usb_read_bulk_callback(struct urb *urb) case -ENOENT: case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; From e7a80033fd226b71bd49602052ff8242355fd4a5 Mon Sep 17 00:00:00 2001 From: Martin Kelly Date: Tue, 5 Dec 2017 11:15:47 -0800 Subject: [PATCH 0436/3326] can: ems_usb: cancel urb on -EPIPE and -EPROTO commit bd352e1adfe0d02d3ea7c8e3fb19183dc317e679 upstream. In mcba_usb, we have observed that when you unplug the device, the driver will endlessly resubmit failing URBs, which can cause CPU stalls. This issue is fixed in mcba_usb by catching the codes seen on device disconnect (-EPIPE and -EPROTO). This driver also resubmits in the case of -EPIPE and -EPROTO, so fix it in the same way. Signed-off-by: Martin Kelly Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/usb/ems_usb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index b3d02759c226bb..b0035829742460 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -288,6 +288,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb) case -ECONNRESET: /* unlink */ case -ENOENT: + case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; From 421b93cb0f352c84964b701cf1db2e221c187d37 Mon Sep 17 00:00:00 2001 From: Martin Kelly Date: Tue, 5 Dec 2017 11:15:48 -0800 Subject: [PATCH 0437/3326] can: esd_usb2: cancel urb on -EPIPE and -EPROTO commit 7a31ced3de06e9878e4f9c3abe8f87d9344d8144 upstream. In mcba_usb, we have observed that when you unplug the device, the driver will endlessly resubmit failing URBs, which can cause CPU stalls. This issue is fixed in mcba_usb by catching the codes seen on device disconnect (-EPIPE and -EPROTO). This driver also resubmits in the case of -EPIPE and -EPROTO, so fix it in the same way. Signed-off-by: Martin Kelly Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/usb/esd_usb2.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 9fdb0f0bfa06a0..c6dcf93675c005 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -393,6 +393,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb) break; case -ENOENT: + case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; From 67464fbca35ccfbe2f5e957ff2a31334390b7947 Mon Sep 17 00:00:00 2001 From: Martin Kelly Date: Tue, 5 Dec 2017 11:15:50 -0800 Subject: [PATCH 0438/3326] can: usb_8dev: cancel urb on -EPIPE and -EPROTO commit 12147edc434c9e4c7c2f5fee2e5519b2e5ac34ce upstream. In mcba_usb, we have observed that when you unplug the device, the driver will endlessly resubmit failing URBs, which can cause CPU stalls. This issue is fixed in mcba_usb by catching the codes seen on device disconnect (-EPIPE and -EPROTO). This driver also resubmits in the case of -EPIPE and -EPROTO, so fix it in the same way. Signed-off-by: Martin Kelly Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/usb/usb_8dev.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index d000cb62d6ae8c..27861c417c9404 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -524,6 +524,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb) break; case -ENOENT: + case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; From 1f5d203906bc32ddb57d73ec6ea1694505ab7ffa Mon Sep 17 00:00:00 2001 From: Stephane Grosjean Date: Thu, 7 Dec 2017 16:13:43 +0100 Subject: [PATCH 0439/3326] can: peak/pcie_fd: fix potential bug in restarting tx queue commit 91785de6f94b58c3fb6664609e3682f011bd28d2 upstream. Don't rely on can_get_echo_skb() return value to wake the network tx queue up: can_get_echo_skb() returns 0 if the echo array slot was not occupied, but also when the DLC of the released echo frame was 0. Signed-off-by: Stephane Grosjean Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/peak_canfd/peak_canfd.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 85268be0c913df..55513411a82e68 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -258,21 +258,18 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, /* if this frame is an echo, */ if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) && !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) { - int n; unsigned long flags; spin_lock_irqsave(&priv->echo_lock, flags); - n = can_get_echo_skb(priv->ndev, msg->client); + can_get_echo_skb(priv->ndev, msg->client); spin_unlock_irqrestore(&priv->echo_lock, flags); /* count bytes of the echo instead of skb */ stats->tx_bytes += cf_len; stats->tx_packets++; - if (n) { - /* restart tx queue only if a slot is free */ - netif_wake_queue(priv->ndev); - } + /* restart tx queue (a slot is free) */ + netif_wake_queue(priv->ndev); return 0; } From c5a5c47c40ce2478ad5b42b781b16c3e3849438f Mon Sep 17 00:00:00 2001 From: weiping zhang Date: Wed, 29 Nov 2017 09:23:01 +0800 Subject: [PATCH 0440/3326] virtio: release virtio index when fail to device_register commit e60ea67bb60459b95a50a156296041a13e0e380e upstream. index can be reused by other virtio device. Signed-off-by: weiping zhang Reviewed-by: Cornelia Huck Signed-off-by: Michael S. Tsirkin Signed-off-by: Greg Kroah-Hartman --- drivers/virtio/virtio.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 48230a5e12f262..bf7ff3934d7fff 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -333,6 +333,8 @@ int register_virtio_device(struct virtio_device *dev) /* device_register() causes the bus infrastructure to look for a * matching driver. */ err = device_register(&dev->dev); + if (err) + ida_simple_remove(&virtio_index_ida, dev->index); out: if (err) virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); From c55befc42a9e65fed575dd7ce4e605e1164df79a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 6 Sep 2017 14:56:50 +0200 Subject: [PATCH 0441/3326] iio: stm32: fix adc/trigger link error commit 6d745ee8b5e81f3a33791e3c854fbbfd6f3e585e upstream. The ADC driver can trigger on either the timer or the lptim trigger, but it only uses a Kconfig 'select' statement to ensure that the first of the two is present. When the lptim trigger is enabled as a loadable module, and the adc driver is built-in, we now get a link error: drivers/iio/adc/stm32-adc.o: In function `stm32_adc_get_trig_extsel': stm32-adc.c:(.text+0x4e0): undefined reference to `is_stm32_lptim_trigger' We could use a second 'select' statement and always have both trigger drivers enabled when the adc driver is, but it seems that the lptimer trigger was intentionally left optional, so it seems better to keep it that way. This adds a hack to use 'IS_REACHABLE()' rather than 'IS_ENABLED()', which avoids the link error, but instead leads to the lptimer trigger not being used in the broken configuration. I've added a runtime warning for this case to help users figure out what they did wrong if this should ever be done by accident. Fixes: f0b638a7f6db ("iio: adc: stm32: add support for lptimer triggers") Signed-off-by: Arnd Bergmann Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- include/linux/iio/timer/stm32-lptim-trigger.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/include/linux/iio/timer/stm32-lptim-trigger.h b/include/linux/iio/timer/stm32-lptim-trigger.h index 34d59bfdce2d2b..464458d20b1650 100644 --- a/include/linux/iio/timer/stm32-lptim-trigger.h +++ b/include/linux/iio/timer/stm32-lptim-trigger.h @@ -16,11 +16,14 @@ #define LPTIM2_OUT "lptim2_out" #define LPTIM3_OUT "lptim3_out" -#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER) +#if IS_REACHABLE(CONFIG_IIO_STM32_LPTIMER_TRIGGER) bool is_stm32_lptim_trigger(struct iio_trigger *trig); #else static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig) { +#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER) + pr_warn_once("stm32 lptim_trigger not linked in\n"); +#endif return false; } #endif From aaeba39ddfe21321c8cfd5e3caeaef1b7153b184 Mon Sep 17 00:00:00 2001 From: Peter Meerwald-Stadler Date: Fri, 27 Oct 2017 21:45:31 +0200 Subject: [PATCH 0442/3326] iio: health: max30102: Temperature should be in milli Celsius commit ad44a9f804c1591ba2a2ec0ac8d916a515d2790c upstream. As per ABI temperature should be in milli Celsius after scaling, not Celsius Note on stable cc. This driver is breaking the standard IIO ABI. (JC) Signed-off-by: Peter Meerwald-Stadler Acked-by: Matt Ranostay Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/health/max30102.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c index 839b875c29b9d5..9fb4bc73a6bc70 100644 --- a/drivers/iio/health/max30102.c +++ b/drivers/iio/health/max30102.c @@ -371,7 +371,7 @@ static int max30102_read_raw(struct iio_dev *indio_dev, mutex_unlock(&indio_dev->mlock); break; case IIO_CHAN_INFO_SCALE: - *val = 1; /* 0.0625 */ + *val = 1000; /* 62.5 */ *val2 = 16; ret = IIO_VAL_FRACTIONAL; break; From 6f4a0a44ba5aafcb4884b863e0e27923c41ab2ec Mon Sep 17 00:00:00 2001 From: Pan Bian Date: Mon, 13 Nov 2017 00:01:20 +0800 Subject: [PATCH 0443/3326] iio: adc: cpcap: fix incorrect validation commit 81b039ec36a41a5451e1e36f05bb055eceab1dc8 upstream. Function platform_get_irq_byname() returns a negative error code on failure, and a zero or positive number on success. However, in function cpcap_adc_probe(), positive IRQ numbers are also taken as error cases. Use "if (ddata->irq < 0)" instead of "if (!ddata->irq)" to validate the return value of platform_get_irq_byname(). Signed-off-by: Pan Bian Fixes: 25ec249632d50 ("iio: adc: cpcap: Add minimal support for CPCAP PMIC ADC") Reviewed-by: Sebastian Reichel Acked-by: Tony Lindgren Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/cpcap-adc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c index 6e419d5a7c1463..f153e02686a084 100644 --- a/drivers/iio/adc/cpcap-adc.c +++ b/drivers/iio/adc/cpcap-adc.c @@ -1012,7 +1012,7 @@ static int cpcap_adc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, indio_dev); ddata->irq = platform_get_irq_byname(pdev, "adcdone"); - if (!ddata->irq) + if (ddata->irq < 0) return -ENODEV; error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL, From ac2d7838808c5bb68ea357f748cc4c1710555d6d Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Tue, 31 Oct 2017 21:01:43 +0100 Subject: [PATCH 0444/3326] iio: adc: meson-saradc: fix the bit_idx of the adc_en clock commit 7a6b0420d2fe4ce59437bd318826fe468f0d71ae upstream. Meson8 and Meson8b SoCs use the the SAR ADC gate clock provided by the MESON_SAR_ADC_REG3 register within the SAR ADC register area. According to the datasheet (and the existing MESON_SAR_ADC_REG3_CLK_EN definition) the gate is on bit 30. The fls() function returns the last set bit, which is "bit index + 1" (fls(MESON_SAR_ADC_REG3_CLK_EN) returns 31). Fix this by switching to __ffs() which returns the first set bit, which is bit 30 in our case. This off by one error results in the ADC not being usable on devices where the bootloader did not enable the clock. Fixes: 3adbf3427330 ("iio: adc: add a driver for the SAR ADC found in Amlogic Meson SoCs") Signed-off-by: Martin Blumenstingl Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/meson_saradc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index 2e8dbb89c8c968..55611244c79912 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -600,7 +600,7 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev, init.num_parents = 1; priv->clk_gate.reg = base + MESON_SAR_ADC_REG3; - priv->clk_gate.bit_idx = fls(MESON_SAR_ADC_REG3_CLK_EN); + priv->clk_gate.bit_idx = __ffs(MESON_SAR_ADC_REG3_CLK_EN); priv->clk_gate.hw.init = &init; priv->adc_clk = devm_clk_register(&indio_dev->dev, &priv->clk_gate.hw); From b6c6d01a2debe8df9541a9bd21cc60760d20f6a2 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Tue, 31 Oct 2017 21:01:44 +0100 Subject: [PATCH 0445/3326] iio: adc: meson-saradc: initialize the bandgap correctly on older SoCs commit d85eed9f576369bc90322659de96b7dbea1f9a57 upstream. Meson8 and Meson8b do not have the MESON_SAR_ADC_REG11 register. The bandgap setting for these SoCs is configured in the MESON_SAR_ADC_DELTA_10 register instead. Make the driver aware of this difference and use the correct bandgap register depending on the SoC. This has worked fine on Meson8 and Meson8b because the bootloader is already initializing the bandgap setting. Fixes: 6c76ed31cd05 ("iio: adc: meson-saradc: add Meson8b SoC compatibility") Signed-off-by: Martin Blumenstingl Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/meson_saradc.c | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index 55611244c79912..abe9df879b2a28 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -221,6 +221,7 @@ enum meson_sar_adc_chan7_mux_sel { struct meson_sar_adc_data { bool has_bl30_integration; + u32 bandgap_reg; unsigned int resolution; const char *name; }; @@ -685,6 +686,20 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev) return 0; } +static void meson_sar_adc_set_bandgap(struct iio_dev *indio_dev, bool on_off) +{ + struct meson_sar_adc_priv *priv = iio_priv(indio_dev); + u32 enable_mask; + + if (priv->data->bandgap_reg == MESON_SAR_ADC_REG11) + enable_mask = MESON_SAR_ADC_REG11_BANDGAP_EN; + else + enable_mask = MESON_SAR_ADC_DELTA_10_TS_VBG_EN; + + regmap_update_bits(priv->regmap, priv->data->bandgap_reg, enable_mask, + on_off ? enable_mask : 0); +} + static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); @@ -717,9 +732,9 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev) regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0, MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval); - regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, - MESON_SAR_ADC_REG11_BANDGAP_EN, - MESON_SAR_ADC_REG11_BANDGAP_EN); + + meson_sar_adc_set_bandgap(indio_dev, true); + regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_ADC_EN, MESON_SAR_ADC_REG3_ADC_EN); @@ -739,8 +754,7 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev) err_adc_clk: regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_ADC_EN, 0); - regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, - MESON_SAR_ADC_REG11_BANDGAP_EN, 0); + meson_sar_adc_set_bandgap(indio_dev, false); clk_disable_unprepare(priv->sana_clk); err_sana_clk: clk_disable_unprepare(priv->core_clk); @@ -765,8 +779,8 @@ static int meson_sar_adc_hw_disable(struct iio_dev *indio_dev) regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_ADC_EN, 0); - regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, - MESON_SAR_ADC_REG11_BANDGAP_EN, 0); + + meson_sar_adc_set_bandgap(indio_dev, false); clk_disable_unprepare(priv->sana_clk); clk_disable_unprepare(priv->core_clk); @@ -845,30 +859,35 @@ static const struct iio_info meson_sar_adc_iio_info = { static const struct meson_sar_adc_data meson_sar_adc_meson8_data = { .has_bl30_integration = false, + .bandgap_reg = MESON_SAR_ADC_DELTA_10, .resolution = 10, .name = "meson-meson8-saradc", }; static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = { .has_bl30_integration = false, + .bandgap_reg = MESON_SAR_ADC_DELTA_10, .resolution = 10, .name = "meson-meson8b-saradc", }; static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = { .has_bl30_integration = true, + .bandgap_reg = MESON_SAR_ADC_REG11, .resolution = 10, .name = "meson-gxbb-saradc", }; static const struct meson_sar_adc_data meson_sar_adc_gxl_data = { .has_bl30_integration = true, + .bandgap_reg = MESON_SAR_ADC_REG11, .resolution = 12, .name = "meson-gxl-saradc", }; static const struct meson_sar_adc_data meson_sar_adc_gxm_data = { .has_bl30_integration = true, + .bandgap_reg = MESON_SAR_ADC_REG11, .resolution = 12, .name = "meson-gxm-saradc", }; From b13ec02ab4a24192ad56b47e39a82269cc4825ca Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Tue, 31 Oct 2017 21:01:45 +0100 Subject: [PATCH 0446/3326] iio: adc: meson-saradc: Meson8 and Meson8b do not have REG11 and REG13 commit 96748823c483c6eed8321f78bd128dd33f09c55c upstream. The Meson GXBB and newer SoCs have a few more registers than the older Meson8 and Meson8b SoCs. Use a separate regmap config to limit the older SoCs to the DELTA_10 register. Fixes: 6c76ed31cd05 ("iio: adc: meson-saradc: add Meson8b SoC compatibility") Signed-off-by: Martin Blumenstingl Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/meson_saradc.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index abe9df879b2a28..7dc7d297a0fc46 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -224,6 +224,7 @@ struct meson_sar_adc_data { u32 bandgap_reg; unsigned int resolution; const char *name; + const struct regmap_config *regmap_config; }; struct meson_sar_adc_priv { @@ -243,13 +244,20 @@ struct meson_sar_adc_priv { int calibscale; }; -static const struct regmap_config meson_sar_adc_regmap_config = { +static const struct regmap_config meson_sar_adc_regmap_config_gxbb = { .reg_bits = 8, .val_bits = 32, .reg_stride = 4, .max_register = MESON_SAR_ADC_REG13, }; +static const struct regmap_config meson_sar_adc_regmap_config_meson8 = { + .reg_bits = 8, + .val_bits = 32, + .reg_stride = 4, + .max_register = MESON_SAR_ADC_DELTA_10, +}; + static unsigned int meson_sar_adc_get_fifo_count(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); @@ -860,6 +868,7 @@ static const struct iio_info meson_sar_adc_iio_info = { static const struct meson_sar_adc_data meson_sar_adc_meson8_data = { .has_bl30_integration = false, .bandgap_reg = MESON_SAR_ADC_DELTA_10, + .regmap_config = &meson_sar_adc_regmap_config_meson8, .resolution = 10, .name = "meson-meson8-saradc", }; @@ -867,6 +876,7 @@ static const struct meson_sar_adc_data meson_sar_adc_meson8_data = { static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = { .has_bl30_integration = false, .bandgap_reg = MESON_SAR_ADC_DELTA_10, + .regmap_config = &meson_sar_adc_regmap_config_meson8, .resolution = 10, .name = "meson-meson8b-saradc", }; @@ -874,6 +884,7 @@ static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = { static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = { .has_bl30_integration = true, .bandgap_reg = MESON_SAR_ADC_REG11, + .regmap_config = &meson_sar_adc_regmap_config_gxbb, .resolution = 10, .name = "meson-gxbb-saradc", }; @@ -881,6 +892,7 @@ static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = { static const struct meson_sar_adc_data meson_sar_adc_gxl_data = { .has_bl30_integration = true, .bandgap_reg = MESON_SAR_ADC_REG11, + .regmap_config = &meson_sar_adc_regmap_config_gxbb, .resolution = 12, .name = "meson-gxl-saradc", }; @@ -888,6 +900,7 @@ static const struct meson_sar_adc_data meson_sar_adc_gxl_data = { static const struct meson_sar_adc_data meson_sar_adc_gxm_data = { .has_bl30_integration = true, .bandgap_reg = MESON_SAR_ADC_REG11, + .regmap_config = &meson_sar_adc_regmap_config_gxbb, .resolution = 12, .name = "meson-gxm-saradc", }; @@ -965,7 +978,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev) return ret; priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, - &meson_sar_adc_regmap_config); + priv->data->regmap_config); if (IS_ERR(priv->regmap)) return PTR_ERR(priv->regmap); From 6c802ac42f970976d26241068e7202a84c9fd8b7 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Tue, 14 Nov 2017 17:51:50 +0100 Subject: [PATCH 0447/3326] pinctrl: armada-37xx: Fix direction_output() callback behavior commit 6702abb3bf2394f250af0ee04070227bb5dda788 upstream. The direction_output callback of the gpio_chip structure is supposed to set the output direction but also to set the value of the gpio. For the armada-37xx driver this callback acted as the gpio_set_direction callback for the pinctrl. This patch fixes the behavior of the direction_output callback by also applying the value received as parameter. Fixes: 5715092a458c ("pinctrl: armada-37xx: Add gpio support") Reported-by: Alexandre Belloni Signed-off-by: Gregory CLEMENT Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/pinctrl/mvebu/pinctrl-armada-37xx.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index 71b944748304e2..c5fe7d4a906591 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -408,12 +408,21 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip, { struct armada_37xx_pinctrl *info = gpiochip_get_data(chip); unsigned int reg = OUTPUT_EN; - unsigned int mask; + unsigned int mask, val, ret; armada_37xx_update_reg(®, offset); mask = BIT(offset); - return regmap_update_bits(info->regmap, reg, mask, mask); + ret = regmap_update_bits(info->regmap, reg, mask, mask); + + if (ret) + return ret; + + reg = OUTPUT_VAL; + val = value ? mask : 0; + regmap_update_bits(info->regmap, reg, mask, val); + + return 0; } static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset) From 3f77a2c68ba8377f7f7ddcadff5ffda238cb83db Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Tue, 14 Nov 2017 06:53:33 -0700 Subject: [PATCH 0448/3326] Drivers: hv: vmbus: Fix a rescind issue commit 7fa32e5ec28b1609abc0b797b58267f725fc3964 upstream. The current rescind processing code will not correctly handle the case where the host immediately rescinds a channel that has been offerred. In this case, we could be blocked in the open call and since the channel is rescinded, the host will not respond and we could be blocked forever in the vmbus open call.i Fix this problem. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- drivers/hv/channel.c | 10 ++++++++-- drivers/hv/channel_mgmt.c | 7 ++++--- include/linux/hyperv.h | 1 + 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 894b67ac2cae50..05964347008d9c 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -640,22 +640,28 @@ void vmbus_close(struct vmbus_channel *channel) */ return; } - mutex_lock(&vmbus_connection.channel_mutex); /* * Close all the sub-channels first and then close the * primary channel. */ list_for_each_safe(cur, tmp, &channel->sc_list) { cur_channel = list_entry(cur, struct vmbus_channel, sc_list); - vmbus_close_internal(cur_channel); if (cur_channel->rescind) { + wait_for_completion(&cur_channel->rescind_event); + mutex_lock(&vmbus_connection.channel_mutex); + vmbus_close_internal(cur_channel); hv_process_channel_removal( cur_channel->offermsg.child_relid); + } else { + mutex_lock(&vmbus_connection.channel_mutex); + vmbus_close_internal(cur_channel); } + mutex_unlock(&vmbus_connection.channel_mutex); } /* * Now close the primary. */ + mutex_lock(&vmbus_connection.channel_mutex); vmbus_close_internal(channel); mutex_unlock(&vmbus_connection.channel_mutex); } diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 379b0df123bee9..65c6d6bdce4cc2 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -333,6 +333,7 @@ static struct vmbus_channel *alloc_channel(void) return NULL; spin_lock_init(&channel->lock); + init_completion(&channel->rescind_event); INIT_LIST_HEAD(&channel->sc_list); INIT_LIST_HEAD(&channel->percpu_list); @@ -883,6 +884,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) /* * Now wait for offer handling to complete. */ + vmbus_rescind_cleanup(channel); while (READ_ONCE(channel->probe_done) == false) { /* * We wait here until any channel offer is currently @@ -898,7 +900,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) if (channel->device_obj) { if (channel->chn_rescind_callback) { channel->chn_rescind_callback(channel); - vmbus_rescind_cleanup(channel); return; } /* @@ -907,7 +908,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) */ dev = get_device(&channel->device_obj->device); if (dev) { - vmbus_rescind_cleanup(channel); vmbus_device_unregister(channel->device_obj); put_device(dev); } @@ -921,13 +921,14 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) * 2. Then close the primary channel. */ mutex_lock(&vmbus_connection.channel_mutex); - vmbus_rescind_cleanup(channel); if (channel->state == CHANNEL_OPEN_STATE) { /* * The channel is currently not open; * it is safe for us to cleanup the channel. */ hv_process_channel_removal(rescind->child_relid); + } else { + complete(&channel->rescind_event); } mutex_unlock(&vmbus_connection.channel_mutex); } diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 6431087816ba5b..ba74eaa8eadffd 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -708,6 +708,7 @@ struct vmbus_channel { u8 monitor_bit; bool rescind; /* got rescind msg */ + struct completion rescind_event; u32 ringbuffer_gpadlhandle; From 070703364a970ac029f04c09d3e42a6d2287e01a Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Tue, 14 Nov 2017 13:06:47 -0700 Subject: [PATCH 0449/3326] hv: kvp: Avoid reading past allocated blocks from KVP file commit 297d6b6e56c2977fc504c61bbeeaa21296923f89 upstream. While reading in more than one block (50) of KVP records, the allocation goes per block, but the reads used the total number of allocated records (without resetting the pointer/stream). This causes the records buffer to overrun when the refresh reads more than one block over the previous capacity (e.g. reading more than 100 KVP records whereas the in-memory database was empty before). Fix this by reading the correct number of KVP records from file each time. Signed-off-by: Paul Meyer Signed-off-by: Long Li Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- tools/hv/hv_kvp_daemon.c | 70 ++++++++-------------------------------- 1 file changed, 14 insertions(+), 56 deletions(-) diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index eaa3bec273c8e2..4c99c57736cefd 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c @@ -193,11 +193,14 @@ static void kvp_update_mem_state(int pool) for (;;) { readp = &record[records_read]; records_read += fread(readp, sizeof(struct kvp_record), - ENTRIES_PER_BLOCK * num_blocks, - filep); + ENTRIES_PER_BLOCK * num_blocks - records_read, + filep); if (ferror(filep)) { - syslog(LOG_ERR, "Failed to read file, pool: %d", pool); + syslog(LOG_ERR, + "Failed to read file, pool: %d; error: %d %s", + pool, errno, strerror(errno)); + kvp_release_lock(pool); exit(EXIT_FAILURE); } @@ -210,6 +213,7 @@ static void kvp_update_mem_state(int pool) if (record == NULL) { syslog(LOG_ERR, "malloc failed"); + kvp_release_lock(pool); exit(EXIT_FAILURE); } continue; @@ -224,15 +228,11 @@ static void kvp_update_mem_state(int pool) fclose(filep); kvp_release_lock(pool); } + static int kvp_file_init(void) { int fd; - FILE *filep; - size_t records_read; char *fname; - struct kvp_record *record; - struct kvp_record *readp; - int num_blocks; int i; int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK; @@ -246,61 +246,19 @@ static int kvp_file_init(void) for (i = 0; i < KVP_POOL_COUNT; i++) { fname = kvp_file_info[i].fname; - records_read = 0; - num_blocks = 1; sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i); fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */); if (fd == -1) return 1; - - filep = fopen(fname, "re"); - if (!filep) { - close(fd); - return 1; - } - - record = malloc(alloc_unit * num_blocks); - if (record == NULL) { - fclose(filep); - close(fd); - return 1; - } - for (;;) { - readp = &record[records_read]; - records_read += fread(readp, sizeof(struct kvp_record), - ENTRIES_PER_BLOCK, - filep); - - if (ferror(filep)) { - syslog(LOG_ERR, "Failed to read file, pool: %d", - i); - exit(EXIT_FAILURE); - } - - if (!feof(filep)) { - /* - * We have more data to read. - */ - num_blocks++; - record = realloc(record, alloc_unit * - num_blocks); - if (record == NULL) { - fclose(filep); - close(fd); - return 1; - } - continue; - } - break; - } kvp_file_info[i].fd = fd; - kvp_file_info[i].num_blocks = num_blocks; - kvp_file_info[i].records = record; - kvp_file_info[i].num_records = records_read; - fclose(filep); - + kvp_file_info[i].num_blocks = 1; + kvp_file_info[i].records = malloc(alloc_unit); + if (kvp_file_info[i].records == NULL) + return 1; + kvp_file_info[i].num_records = 0; + kvp_update_mem_state(i); } return 0; From 7b167dc4b875aa9d3bddf5c29b34ccfe8b87174e Mon Sep 17 00:00:00 2001 From: "Robin H. Johnson" Date: Thu, 16 Nov 2017 14:36:12 -0800 Subject: [PATCH 0450/3326] firmware: cleanup FIRMWARE_IN_KERNEL message commit 0946b2fb38fdb6585a5ac3ca84ac73924f645952 upstream. The help for FIRMWARE_IN_KERNEL still references the firmware_install command that was recently removed by commit 5620a0d1aacd ("firmware: delete in-kernel firmware"). Clean up the message to direct the user to their distribution's linux-firmware package, and remove any reference to firmware being included in the kernel source tree. Fixes: 5620a0d1aacd ("firmware: delete in-kernel firmware"). Cc: Masahiro Yamada Cc: David Woodhouse Signed-off-by: Robin H. Johnson Signed-off-by: Greg Kroah-Hartman --- drivers/base/Kconfig | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 2f6614c9a229ab..bdc87907d6a1b2 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -91,22 +91,23 @@ config FIRMWARE_IN_KERNEL depends on FW_LOADER default y help - The kernel source tree includes a number of firmware 'blobs' - that are used by various drivers. The recommended way to - use these is to run "make firmware_install", which, after - converting ihex files to binary, copies all of the needed - binary files in firmware/ to /lib/firmware/ on your system so - that they can be loaded by userspace helpers on request. + Various drivers in the kernel source tree may require firmware, + which is generally available in your distribution's linux-firmware + package. + + The linux-firmware package should install firmware into + /lib/firmware/ on your system, so they can be loaded by userspace + helpers on request. Enabling this option will build each required firmware blob - into the kernel directly, where request_firmware() will find - them without having to call out to userspace. This may be - useful if your root file system requires a device that uses - such firmware and do not wish to use an initrd. + specified by EXTRA_FIRMWARE into the kernel directly, where + request_firmware() will find them without having to call out to + userspace. This may be useful if your root file system requires a + device that uses such firmware and you do not wish to use an + initrd. This single option controls the inclusion of firmware for - every driver that uses request_firmware() and ships its - firmware in the kernel source tree, which avoids a + every driver that uses request_firmware(), which avoids a proliferation of 'Include firmware for xxx device' options. Say 'N' and let firmware be loaded from userspace. From cdc5340c054b37655ea1f8fef99d3e4ab39855d8 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 15 Nov 2017 13:00:43 -0800 Subject: [PATCH 0451/3326] firmware: vpd: Destroy vpd sections in remove function commit 811d7e0215fb738fb9a9f0bcb1276516ad161ed1 upstream. vpd sections are initialized during probe and thus should be destroyed in the remove function. Fixes: 049a59db34eb ("firmware: Google VPD sysfs driver") Signed-off-by: Guenter Roeck Reviewed-by: Dmitry Torokhov Tested-by: Randy Dunlap Signed-off-by: Greg Kroah-Hartman Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/google/vpd.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c index 35e553b3b19051..84217172297b67 100644 --- a/drivers/firmware/google/vpd.c +++ b/drivers/firmware/google/vpd.c @@ -298,8 +298,17 @@ static int vpd_probe(struct platform_device *pdev) return vpd_sections_init(entry.cbmem_addr); } +static int vpd_remove(struct platform_device *pdev) +{ + vpd_section_destroy(&ro_vpd); + vpd_section_destroy(&rw_vpd); + + return 0; +} + static struct platform_driver vpd_driver = { .probe = vpd_probe, + .remove = vpd_remove, .driver = { .name = "vpd", }, @@ -324,8 +333,6 @@ static int __init vpd_platform_init(void) static void __exit vpd_platform_exit(void) { - vpd_section_destroy(&ro_vpd); - vpd_section_destroy(&rw_vpd); kobject_put(vpd_kobj); } From 03bb8e9a18190ee12f5250ff4e97ac035fb2abca Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 15 Nov 2017 13:00:44 -0800 Subject: [PATCH 0452/3326] firmware: vpd: Tie firmware kobject to device lifetime commit e4b28b3c3a405b251fa25db58abe1512814a680a upstream. It doesn't make sense to have /sys/firmware/vpd if the device is not instantiated, so tie its lifetime to the device. Fixes: 049a59db34eb ("firmware: Google VPD sysfs driver") Signed-off-by: Guenter Roeck Reviewed-by: Dmitry Torokhov Tested-by: Randy Dunlap Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/google/vpd.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c index 84217172297b67..942e358efa6026 100644 --- a/drivers/firmware/google/vpd.c +++ b/drivers/firmware/google/vpd.c @@ -295,7 +295,17 @@ static int vpd_probe(struct platform_device *pdev) if (ret) return ret; - return vpd_sections_init(entry.cbmem_addr); + vpd_kobj = kobject_create_and_add("vpd", firmware_kobj); + if (!vpd_kobj) + return -ENOMEM; + + ret = vpd_sections_init(entry.cbmem_addr); + if (ret) { + kobject_put(vpd_kobj); + return ret; + } + + return 0; } static int vpd_remove(struct platform_device *pdev) @@ -303,6 +313,8 @@ static int vpd_remove(struct platform_device *pdev) vpd_section_destroy(&ro_vpd); vpd_section_destroy(&rw_vpd); + kobject_put(vpd_kobj); + return 0; } @@ -322,10 +334,6 @@ static int __init vpd_platform_init(void) if (IS_ERR(pdev)) return PTR_ERR(pdev); - vpd_kobj = kobject_create_and_add("vpd", firmware_kobj); - if (!vpd_kobj) - return -ENOMEM; - platform_driver_register(&vpd_driver); return 0; @@ -333,7 +341,6 @@ static int __init vpd_platform_init(void) static void __exit vpd_platform_exit(void) { - kobject_put(vpd_kobj); } module_init(vpd_platform_init); From ff8307709c0aff44bb1919e7d98ffa245af7cf6a Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 15 Nov 2017 13:00:45 -0800 Subject: [PATCH 0453/3326] firmware: vpd: Fix platform driver and device registration/unregistration commit 0631fb8b027f5968c2f5031f0b3ff7be3e4bebcc upstream. The driver exit function needs to unregister both platform device and driver. Also, during registration, register driver first and perform error checks. Fixes: 049a59db34eb ("firmware: Google VPD sysfs driver") Signed-off-by: Guenter Roeck Tested-by: Randy Dunlap Reviewed-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/google/vpd.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c index 942e358efa6026..e4b40f2b46274a 100644 --- a/drivers/firmware/google/vpd.c +++ b/drivers/firmware/google/vpd.c @@ -326,21 +326,29 @@ static struct platform_driver vpd_driver = { }, }; +static struct platform_device *vpd_pdev; + static int __init vpd_platform_init(void) { - struct platform_device *pdev; + int ret; - pdev = platform_device_register_simple("vpd", -1, NULL, 0); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); + ret = platform_driver_register(&vpd_driver); + if (ret) + return ret; - platform_driver_register(&vpd_driver); + vpd_pdev = platform_device_register_simple("vpd", -1, NULL, 0); + if (IS_ERR(vpd_pdev)) { + platform_driver_unregister(&vpd_driver); + return PTR_ERR(vpd_pdev); + } return 0; } static void __exit vpd_platform_exit(void) { + platform_device_unregister(vpd_pdev); + platform_driver_unregister(&vpd_driver); } module_init(vpd_platform_init); From 400391d711b1f783c85679b70f71c5fea56c0341 Mon Sep 17 00:00:00 2001 From: William Breathitt Gray Date: Wed, 8 Nov 2017 10:23:11 -0500 Subject: [PATCH 0454/3326] isa: Prevent NULL dereference in isa_bus driver callbacks commit 5a244727f428a06634f22bb890e78024ab0c89f3 upstream. The isa_driver structure for an isa_bus device is stored in the device platform_data member of the respective device structure. This platform_data member may be reset to NULL if isa_driver match callback for the device fails, indicating a device unsupported by the ISA driver. This patch fixes a possible NULL pointer dereference if one of the isa_driver callbacks to attempted for an unsupported device. This error should not occur in practice since ISA devices are typically manually configured and loaded by the users, but we may as well prevent this error from popping up for the 0day testers. Fixes: a5117ba7da37 ("[PATCH] Driver model: add ISA bus") Signed-off-by: William Breathitt Gray Acked-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- drivers/base/isa.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/base/isa.c b/drivers/base/isa.c index cd6ccdcf9df0c5..372d10af26009d 100644 --- a/drivers/base/isa.c +++ b/drivers/base/isa.c @@ -39,7 +39,7 @@ static int isa_bus_probe(struct device *dev) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->probe) + if (isa_driver && isa_driver->probe) return isa_driver->probe(dev, to_isa_dev(dev)->id); return 0; @@ -49,7 +49,7 @@ static int isa_bus_remove(struct device *dev) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->remove) + if (isa_driver && isa_driver->remove) return isa_driver->remove(dev, to_isa_dev(dev)->id); return 0; @@ -59,7 +59,7 @@ static void isa_bus_shutdown(struct device *dev) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->shutdown) + if (isa_driver && isa_driver->shutdown) isa_driver->shutdown(dev, to_isa_dev(dev)->id); } @@ -67,7 +67,7 @@ static int isa_bus_suspend(struct device *dev, pm_message_t state) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->suspend) + if (isa_driver && isa_driver->suspend) return isa_driver->suspend(dev, to_isa_dev(dev)->id, state); return 0; @@ -77,7 +77,7 @@ static int isa_bus_resume(struct device *dev) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->resume) + if (isa_driver && isa_driver->resume) return isa_driver->resume(dev, to_isa_dev(dev)->id); return 0; From bc2b3046048bc193def020ef9d871f4edb3f47ba Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 21 Nov 2017 14:23:37 +0100 Subject: [PATCH 0455/3326] scsi: dma-mapping: always provide dma_get_cache_alignment commit 860dd4424f344400b491b212ee4acb3a358ba9d9 upstream. Provide the dummy version of dma_get_cache_alignment that always returns 1 even if CONFIG_HAS_DMA is not set, so that drivers and subsystems can use it without ifdefs. Signed-off-by: Christoph Hellwig Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- include/linux/dma-mapping.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 7653ea66874dd5..46930f82a9888a 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -697,7 +697,6 @@ static inline void *dma_zalloc_coherent(struct device *dev, size_t size, return ret; } -#ifdef CONFIG_HAS_DMA static inline int dma_get_cache_alignment(void) { #ifdef ARCH_DMA_MINALIGN @@ -705,7 +704,6 @@ static inline int dma_get_cache_alignment(void) #endif return 1; } -#endif /* flags for the coherent memory api */ #define DMA_MEMORY_EXCLUSIVE 0x01 From 21747052c4b31f453e9fbbf4ee46f221c58cf2cd Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Tue, 21 Nov 2017 14:23:38 +0100 Subject: [PATCH 0456/3326] scsi: use dma_get_cache_alignment() as minimum DMA alignment commit 90addc6b3c9cda0146fbd62a08e234c2b224a80c upstream. In non-coherent DMA mode, kernel uses cache flushing operations to maintain I/O coherency, so scsi's block queue should be aligned to the value returned by dma_get_cache_alignment(). Otherwise, If a DMA buffer and a kernel structure share a same cache line, and if the kernel structure has dirty data, cache_invalidate (no writeback) will cause data corruption. Signed-off-by: Huacai Chen [hch: rebased and updated the comment and changelog] Signed-off-by: Christoph Hellwig Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/scsi_lib.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index bcc1694cebcd3e..635cfa1f2aced8 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2126,11 +2126,13 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) q->limits.cluster = 0; /* - * set a reasonable default alignment on word boundaries: the - * host and device may alter it using - * blk_queue_update_dma_alignment() later. + * Set a reasonable default alignment: The larger of 32-byte (dword), + * which is a common minimum for HBAs, and the minimum DMA alignment, + * which is set by the platform. + * + * Devices that require a bigger alignment can increase it later. */ - blk_queue_dma_alignment(q, 0x03); + blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1); } EXPORT_SYMBOL_GPL(__scsi_init_queue); From a326fc91abb834c832166cff9f4362f98ff7f95d Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Tue, 21 Nov 2017 14:23:39 +0100 Subject: [PATCH 0457/3326] scsi: libsas: align sata_device's rps_resp on a cacheline commit c2e8fbf908afd81ad502b567a6639598f92c9b9d upstream. The rps_resp buffer in ata_device is a DMA target, but it isn't explicitly cacheline aligned. Due to this, adjacent fields can be overwritten with stale data from memory on non-coherent architectures. As a result, the kernel is sometimes unable to communicate with an SATA device behind a SAS expander. Fix this by ensuring that the rps_resp buffer is cacheline aligned. This issue is similar to that fixed by Commit 84bda12af31f93 ("libata: align ap->sector_buf") and Commit 4ee34ea3a12396f35b26 ("libata: Align ata_device's id on a cacheline"). Signed-off-by: Huacai Chen Signed-off-by: Christoph Hellwig Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- include/scsi/libsas.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 6c0dc6155ee757..a966d281dedc3b 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -165,11 +165,11 @@ struct expander_device { struct sata_device { unsigned int class; - struct smp_resp rps_resp; /* report_phy_sata_resp */ u8 port_no; /* port number, if this is a PM (Port) */ struct ata_port *ap; struct ata_host ata_host; + struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */ u8 fis[ATA_RESP_FIS_SIZE]; }; From 985ce9ee25cca9d8841b0cc31ced92dbd1f9bf99 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 6 Dec 2017 09:50:08 +0000 Subject: [PATCH 0458/3326] efi: Move some sysfs files to be read-only by root commit af97a77bc01ce49a466f9d4c0125479e2e2230b6 upstream. Thanks to the scripts/leaking_addresses.pl script, it was found that some EFI values should not be readable by non-root users. So make them root-only, and to do that, add a __ATTR_RO_MODE() macro to make this easier, and use it in other places at the same time. Reported-by: Linus Torvalds Tested-by: Dave Young Signed-off-by: Greg Kroah-Hartman Signed-off-by: Ard Biesheuvel Cc: H. Peter Anvin Cc: Matt Fleming Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/20171206095010.24170-2-ard.biesheuvel@linaro.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/efi/efi.c | 3 +-- drivers/firmware/efi/esrt.c | 15 ++++++--------- drivers/firmware/efi/runtime-map.c | 10 +++++----- include/linux/sysfs.h | 6 ++++++ 4 files changed, 18 insertions(+), 16 deletions(-) diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index f70febf680c392..c3eefa126e3b0c 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -143,8 +143,7 @@ static ssize_t systab_show(struct kobject *kobj, return str - buf; } -static struct kobj_attribute efi_attr_systab = - __ATTR(systab, 0400, systab_show, NULL); +static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); #define EFI_FIELD(var) efi.var diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index bd7ed3c1148a7c..7aae2483fcb947 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -106,7 +106,7 @@ static const struct sysfs_ops esre_attr_ops = { }; /* Generic ESRT Entry ("ESRE") support. */ -static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf) +static ssize_t fw_class_show(struct esre_entry *entry, char *buf) { char *str = buf; @@ -117,18 +117,16 @@ static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf) return str - buf; } -static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400, - esre_fw_class_show, NULL); +static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400); #define esre_attr_decl(name, size, fmt) \ -static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \ +static ssize_t name##_show(struct esre_entry *entry, char *buf) \ { \ return sprintf(buf, fmt "\n", \ le##size##_to_cpu(entry->esre.esre1->name)); \ } \ \ -static struct esre_attribute esre_##name = __ATTR(name, 0400, \ - esre_##name##_show, NULL) +static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400) esre_attr_decl(fw_type, 32, "%u"); esre_attr_decl(fw_version, 32, "%u"); @@ -193,14 +191,13 @@ static int esre_create_sysfs_entry(void *esre, int entry_num) /* support for displaying ESRT fields at the top level */ #define esrt_attr_decl(name, size, fmt) \ -static ssize_t esrt_##name##_show(struct kobject *kobj, \ +static ssize_t name##_show(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf)\ { \ return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \ } \ \ -static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \ - esrt_##name##_show, NULL) +static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400) esrt_attr_decl(fw_resource_count, 32, "%u"); esrt_attr_decl(fw_resource_count_max, 32, "%u"); diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c index 8e64b77aeac95e..f377609ff141bc 100644 --- a/drivers/firmware/efi/runtime-map.c +++ b/drivers/firmware/efi/runtime-map.c @@ -63,11 +63,11 @@ static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr, return map_attr->show(entry, buf); } -static struct map_attribute map_type_attr = __ATTR_RO(type); -static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr); -static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr); -static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages); -static struct map_attribute map_attribute_attr = __ATTR_RO(attribute); +static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400); +static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400); +static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400); +static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400); +static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400); /* * These are default attributes that are added for every memmap entry. diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index e32dfe098e822f..40839c02d28c04 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -117,6 +117,12 @@ struct attribute_group { .show = _name##_show, \ } +#define __ATTR_RO_MODE(_name, _mode) { \ + .attr = { .name = __stringify(_name), \ + .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ + .show = _name##_show, \ +} + #define __ATTR_WO(_name) { \ .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \ .store = _name##_store, \ From f4d901739324cc1c400f322767a9df6de7d838ce Mon Sep 17 00:00:00 2001 From: Pan Bian Date: Wed, 6 Dec 2017 09:50:09 +0000 Subject: [PATCH 0459/3326] efi/esrt: Use memunmap() instead of kfree() to free the remapping commit 89c5a2d34bda58319e3075e8e7dd727ea25a435c upstream. The remapping result of memremap() should be freed with memunmap(), not kfree(). Signed-off-by: Pan Bian Signed-off-by: Ard Biesheuvel Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Matt Fleming Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/20171206095010.24170-3-ard.biesheuvel@linaro.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/efi/esrt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index 7aae2483fcb947..c47e0c6ec00f85 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -428,7 +428,7 @@ static int __init esrt_sysfs_init(void) err_remove_esrt: kobject_put(esrt_kobj); err: - kfree(esrt); + memunmap(esrt); esrt = NULL; return error; } From 2c4c01d13f74ef37299b4e23a95bb0691298370a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 8 Dec 2017 15:13:27 +0000 Subject: [PATCH 0460/3326] ASN.1: fix out-of-bounds read when parsing indefinite length item commit e0058f3a874ebb48b25be7ff79bc3b4e59929f90 upstream. In asn1_ber_decoder(), indefinitely-sized ASN.1 items were being passed to the action functions before their lengths had been computed, using the bogus length of 0x80 (ASN1_INDEFINITE_LENGTH). This resulted in reading data past the end of the input buffer, when given a specially crafted message. Fix it by rearranging the code so that the indefinite length is resolved before the action is called. This bug was originally found by fuzzing the X.509 parser in userspace using libFuzzer from the LLVM project. KASAN report (cleaned up slightly): BUG: KASAN: slab-out-of-bounds in memcpy ./include/linux/string.h:341 [inline] BUG: KASAN: slab-out-of-bounds in x509_fabricate_name.constprop.1+0x1a4/0x940 crypto/asymmetric_keys/x509_cert_parser.c:366 Read of size 128 at addr ffff880035dd9eaf by task keyctl/195 CPU: 1 PID: 195 Comm: keyctl Not tainted 4.14.0-09238-g1d3b78bbc6e9 #26 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.11.0-20171110_100015-anatol 04/01/2014 Call Trace: __dump_stack lib/dump_stack.c:17 [inline] dump_stack+0xd1/0x175 lib/dump_stack.c:53 print_address_description+0x78/0x260 mm/kasan/report.c:252 kasan_report_error mm/kasan/report.c:351 [inline] kasan_report+0x23f/0x350 mm/kasan/report.c:409 memcpy+0x1f/0x50 mm/kasan/kasan.c:302 memcpy ./include/linux/string.h:341 [inline] x509_fabricate_name.constprop.1+0x1a4/0x940 crypto/asymmetric_keys/x509_cert_parser.c:366 asn1_ber_decoder+0xb4a/0x1fd0 lib/asn1_decoder.c:447 x509_cert_parse+0x1c7/0x620 crypto/asymmetric_keys/x509_cert_parser.c:89 x509_key_preparse+0x61/0x750 crypto/asymmetric_keys/x509_public_key.c:174 asymmetric_key_preparse+0xa4/0x150 crypto/asymmetric_keys/asymmetric_type.c:388 key_create_or_update+0x4d4/0x10a0 security/keys/key.c:850 SYSC_add_key security/keys/keyctl.c:122 [inline] SyS_add_key+0xe8/0x290 security/keys/keyctl.c:62 entry_SYSCALL_64_fastpath+0x1f/0x96 Allocated by task 195: __do_kmalloc_node mm/slab.c:3675 [inline] __kmalloc_node+0x47/0x60 mm/slab.c:3682 kvmalloc ./include/linux/mm.h:540 [inline] SYSC_add_key security/keys/keyctl.c:104 [inline] SyS_add_key+0x19e/0x290 security/keys/keyctl.c:62 entry_SYSCALL_64_fastpath+0x1f/0x96 Fixes: 42d5ec27f873 ("X.509: Add an ASN.1 decoder") Reported-by: Alexander Potapenko Signed-off-by: Eric Biggers Signed-off-by: David Howells Signed-off-by: Greg Kroah-Hartman --- lib/asn1_decoder.c | 47 +++++++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c index 1ef0cec38d7879..d77cdfc4b554c4 100644 --- a/lib/asn1_decoder.c +++ b/lib/asn1_decoder.c @@ -313,42 +313,47 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder, /* Decide how to handle the operation */ switch (op) { - case ASN1_OP_MATCH_ANY_ACT: - case ASN1_OP_MATCH_ANY_ACT_OR_SKIP: - case ASN1_OP_COND_MATCH_ANY_ACT: - case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP: - ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len); - if (ret < 0) - return ret; - goto skip_data; - - case ASN1_OP_MATCH_ACT: - case ASN1_OP_MATCH_ACT_OR_SKIP: - case ASN1_OP_COND_MATCH_ACT_OR_SKIP: - ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len); - if (ret < 0) - return ret; - goto skip_data; - case ASN1_OP_MATCH: case ASN1_OP_MATCH_OR_SKIP: + case ASN1_OP_MATCH_ACT: + case ASN1_OP_MATCH_ACT_OR_SKIP: case ASN1_OP_MATCH_ANY: case ASN1_OP_MATCH_ANY_OR_SKIP: + case ASN1_OP_MATCH_ANY_ACT: + case ASN1_OP_MATCH_ANY_ACT_OR_SKIP: case ASN1_OP_COND_MATCH_OR_SKIP: + case ASN1_OP_COND_MATCH_ACT_OR_SKIP: case ASN1_OP_COND_MATCH_ANY: case ASN1_OP_COND_MATCH_ANY_OR_SKIP: - skip_data: + case ASN1_OP_COND_MATCH_ANY_ACT: + case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP: + if (!(flags & FLAG_CONS)) { if (flags & FLAG_INDEFINITE_LENGTH) { + size_t tmp = dp; + ret = asn1_find_indefinite_length( - data, datalen, &dp, &len, &errmsg); + data, datalen, &tmp, &len, &errmsg); if (ret < 0) goto error; - } else { - dp += len; } pr_debug("- LEAF: %zu\n", len); } + + if (op & ASN1_OP_MATCH__ACT) { + unsigned char act; + + if (op & ASN1_OP_MATCH__ANY) + act = machine[pc + 1]; + else + act = machine[pc + 2]; + ret = actions[act](context, hdr, tag, data + dp, len); + if (ret < 0) + return ret; + } + + if (!(flags & FLAG_CONS)) + dp += len; pc += asn1_op_lengths[op]; goto next_op; From 4c69b34050996d1a1167242f625973dfb9f54271 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 8 Dec 2017 15:13:27 +0000 Subject: [PATCH 0461/3326] ASN.1: check for error from ASN1_OP_END__ACT actions commit 81a7be2cd69b412ab6aeacfe5ebf1bb6e5bce955 upstream. asn1_ber_decoder() was ignoring errors from actions associated with the opcodes ASN1_OP_END_SEQ_ACT, ASN1_OP_END_SET_ACT, ASN1_OP_END_SEQ_OF_ACT, and ASN1_OP_END_SET_OF_ACT. In practice, this meant the pkcs7_note_signed_info() action (since that was the only user of those opcodes). Fix it by checking for the error, just like the decoder does for actions associated with the other opcodes. This bug allowed users to leak slab memory by repeatedly trying to add a specially crafted "pkcs7_test" key (requires CONFIG_PKCS7_TEST_KEY). In theory, this bug could also be used to bypass module signature verification, by providing a PKCS#7 message that is misparsed such that a signature's ->authattrs do not contain its ->msgdigest. But it doesn't seem practical in normal cases, due to restrictions on the format of the ->authattrs. Fixes: 42d5ec27f873 ("X.509: Add an ASN.1 decoder") Signed-off-by: Eric Biggers Signed-off-by: David Howells Reviewed-by: James Morris Signed-off-by: Greg Kroah-Hartman --- lib/asn1_decoder.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c index d77cdfc4b554c4..dc14beae2c9aac 100644 --- a/lib/asn1_decoder.c +++ b/lib/asn1_decoder.c @@ -439,6 +439,8 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder, else act = machine[pc + 1]; ret = actions[act](context, hdr, 0, data + tdp, len); + if (ret < 0) + return ret; } pc += asn1_op_lengths[op]; goto next_op; From 69d5894ce0a67d37f900d2597fc0b2b8cef6c863 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 8 Dec 2017 15:13:27 +0000 Subject: [PATCH 0462/3326] KEYS: add missing permission check for request_key() destination commit 4dca6ea1d9432052afb06baf2e3ae78188a4410b upstream. When the request_key() syscall is not passed a destination keyring, it links the requested key (if constructed) into the "default" request-key keyring. This should require Write permission to the keyring. However, there is actually no permission check. This can be abused to add keys to any keyring to which only Search permission is granted. This is because Search permission allows joining the keyring. keyctl_set_reqkey_keyring(KEY_REQKEY_DEFL_SESSION_KEYRING) then will set the default request-key keyring to the session keyring. Then, request_key() can be used to add keys to the keyring. Both negatively and positively instantiated keys can be added using this method. Adding negative keys is trivial. Adding a positive key is a bit trickier. It requires that either /sbin/request-key positively instantiates the key, or that another thread adds the key to the process keyring at just the right time, such that request_key() misses it initially but then finds it in construct_alloc_key(). Fix this bug by checking for Write permission to the keyring in construct_get_dest_keyring() when the default keyring is being used. We don't do the permission check for non-default keyrings because that was already done by the earlier call to lookup_user_key(). Also, request_key_and_link() is currently passed a 'struct key *' rather than a key_ref_t, so the "possessed" bit is unavailable. We also don't do the permission check for the "requestor keyring", to continue to support the use case described by commit 8bbf4976b59f ("KEYS: Alter use of key instantiation link-to-keyring argument") where /sbin/request-key recursively calls request_key() to add keys to the original requestor's destination keyring. (I don't know of any users who actually do that, though...) Fixes: 3e30148c3d52 ("[PATCH] Keys: Make request-key create an authorisation key") Signed-off-by: Eric Biggers Signed-off-by: David Howells Signed-off-by: Greg Kroah-Hartman --- security/keys/request_key.c | 46 +++++++++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/security/keys/request_key.c b/security/keys/request_key.c index e8036cd0ad5430..7dc74138215427 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -251,11 +251,12 @@ static int construct_key(struct key *key, const void *callout_info, * The keyring selected is returned with an extra reference upon it which the * caller must release. */ -static void construct_get_dest_keyring(struct key **_dest_keyring) +static int construct_get_dest_keyring(struct key **_dest_keyring) { struct request_key_auth *rka; const struct cred *cred = current_cred(); struct key *dest_keyring = *_dest_keyring, *authkey; + int ret; kenter("%p", dest_keyring); @@ -264,6 +265,8 @@ static void construct_get_dest_keyring(struct key **_dest_keyring) /* the caller supplied one */ key_get(dest_keyring); } else { + bool do_perm_check = true; + /* use a default keyring; falling through the cases until we * find one that we actually have */ switch (cred->jit_keyring) { @@ -278,8 +281,10 @@ static void construct_get_dest_keyring(struct key **_dest_keyring) dest_keyring = key_get(rka->dest_keyring); up_read(&authkey->sem); - if (dest_keyring) + if (dest_keyring) { + do_perm_check = false; break; + } } case KEY_REQKEY_DEFL_THREAD_KEYRING: @@ -314,11 +319,29 @@ static void construct_get_dest_keyring(struct key **_dest_keyring) default: BUG(); } + + /* + * Require Write permission on the keyring. This is essential + * because the default keyring may be the session keyring, and + * joining a keyring only requires Search permission. + * + * However, this check is skipped for the "requestor keyring" so + * that /sbin/request-key can itself use request_key() to add + * keys to the original requestor's destination keyring. + */ + if (dest_keyring && do_perm_check) { + ret = key_permission(make_key_ref(dest_keyring, 1), + KEY_NEED_WRITE); + if (ret) { + key_put(dest_keyring); + return ret; + } + } } *_dest_keyring = dest_keyring; kleave(" [dk %d]", key_serial(dest_keyring)); - return; + return 0; } /* @@ -444,11 +467,15 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx, if (ctx->index_key.type == &key_type_keyring) return ERR_PTR(-EPERM); - user = key_user_lookup(current_fsuid()); - if (!user) - return ERR_PTR(-ENOMEM); + ret = construct_get_dest_keyring(&dest_keyring); + if (ret) + goto error; - construct_get_dest_keyring(&dest_keyring); + user = key_user_lookup(current_fsuid()); + if (!user) { + ret = -ENOMEM; + goto error_put_dest_keyring; + } ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key); key_user_put(user); @@ -463,7 +490,7 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx, } else if (ret == -EINPROGRESS) { ret = 0; } else { - goto couldnt_alloc_key; + goto error_put_dest_keyring; } key_put(dest_keyring); @@ -473,8 +500,9 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx, construction_failed: key_negate_and_link(key, key_negative_timeout, NULL, NULL); key_put(key); -couldnt_alloc_key: +error_put_dest_keyring: key_put(dest_keyring); +error: kleave(" = %d", ret); return ERR_PTR(ret); } From 28e7c9a8e548c8c7e839f6ae1e21d61dc92308b0 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 8 Dec 2017 15:13:29 +0000 Subject: [PATCH 0463/3326] KEYS: reject NULL restriction string when type is specified commit 18026d866801d0c52e5550210563222bd6c7191d upstream. keyctl_restrict_keyring() allows through a NULL restriction when the "type" is non-NULL, which causes a NULL pointer dereference in asymmetric_lookup_restriction() when it calls strcmp() on the restriction string. But no key types actually use a "NULL restriction" to mean anything, so update keyctl_restrict_keyring() to reject it with EINVAL. Reported-by: syzbot Fixes: 97d3aa0f3134 ("KEYS: Add a lookup_restriction function for the asymmetric key type") Signed-off-by: Eric Biggers Signed-off-by: David Howells Signed-off-by: Greg Kroah-Hartman --- security/keys/keyctl.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 76d22f726ae49d..1ffe60bb2845f9 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -1588,9 +1588,8 @@ long keyctl_session_to_parent(void) * The caller must have Setattr permission to change keyring restrictions. * * The requested type name may be a NULL pointer to reject all attempts - * to link to the keyring. If _type is non-NULL, _restriction can be - * NULL or a pointer to a string describing the restriction. If _type is - * NULL, _restriction must also be NULL. + * to link to the keyring. In this case, _restriction must also be NULL. + * Otherwise, both _type and _restriction must be non-NULL. * * Returns 0 if successful. */ @@ -1598,7 +1597,6 @@ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type, const char __user *_restriction) { key_ref_t key_ref; - bool link_reject = !_type; char type[32]; char *restriction = NULL; long ret; @@ -1607,31 +1605,29 @@ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type, if (IS_ERR(key_ref)) return PTR_ERR(key_ref); + ret = -EINVAL; if (_type) { - ret = key_get_type_from_user(type, _type, sizeof(type)); - if (ret < 0) + if (!_restriction) goto error; - } - if (_restriction) { - if (!_type) { - ret = -EINVAL; + ret = key_get_type_from_user(type, _type, sizeof(type)); + if (ret < 0) goto error; - } restriction = strndup_user(_restriction, PAGE_SIZE); if (IS_ERR(restriction)) { ret = PTR_ERR(restriction); goto error; } + } else { + if (_restriction) + goto error; } - ret = keyring_restrict(key_ref, link_reject ? NULL : type, restriction); + ret = keyring_restrict(key_ref, _type ? type : NULL, restriction); kfree(restriction); - error: key_ref_put(key_ref); - return ret; } From 31b3bcc66f52790010f6a5d4458cabf89f2ffcfa Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 8 Dec 2017 15:13:27 +0000 Subject: [PATCH 0464/3326] X.509: reject invalid BIT STRING for subjectPublicKey commit 0f30cbea005bd3077bd98cd29277d7fc2699c1da upstream. Adding a specially crafted X.509 certificate whose subjectPublicKey ASN.1 value is zero-length caused x509_extract_key_data() to set the public key size to SIZE_MAX, as it subtracted the nonexistent BIT STRING metadata byte. Then, x509_cert_parse() called kmemdup() with that bogus size, triggering the WARN_ON_ONCE() in kmalloc_slab(). This appears to be harmless, but it still must be fixed since WARNs are never supposed to be user-triggerable. Fix it by updating x509_cert_parse() to validate that the value has a BIT STRING metadata byte, and that the byte is 0 which indicates that the number of bits in the bitstring is a multiple of 8. It would be nice to handle the metadata byte in asn1_ber_decoder() instead. But that would be tricky because in the general case a BIT STRING could be implicitly tagged, and/or could legitimately have a length that is not a whole number of bytes. Here was the WARN (cleaned up slightly): WARNING: CPU: 1 PID: 202 at mm/slab_common.c:971 kmalloc_slab+0x5d/0x70 mm/slab_common.c:971 Modules linked in: CPU: 1 PID: 202 Comm: keyctl Tainted: G B 4.14.0-09238-g1d3b78bbc6e9 #26 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.11.0-20171110_100015-anatol 04/01/2014 task: ffff880033014180 task.stack: ffff8800305c8000 Call Trace: __do_kmalloc mm/slab.c:3706 [inline] __kmalloc_track_caller+0x22/0x2e0 mm/slab.c:3726 kmemdup+0x17/0x40 mm/util.c:118 kmemdup include/linux/string.h:414 [inline] x509_cert_parse+0x2cb/0x620 crypto/asymmetric_keys/x509_cert_parser.c:106 x509_key_preparse+0x61/0x750 crypto/asymmetric_keys/x509_public_key.c:174 asymmetric_key_preparse+0xa4/0x150 crypto/asymmetric_keys/asymmetric_type.c:388 key_create_or_update+0x4d4/0x10a0 security/keys/key.c:850 SYSC_add_key security/keys/keyctl.c:122 [inline] SyS_add_key+0xe8/0x290 security/keys/keyctl.c:62 entry_SYSCALL_64_fastpath+0x1f/0x96 Fixes: 42d5ec27f873 ("X.509: Add an ASN.1 decoder") Signed-off-by: Eric Biggers Signed-off-by: David Howells Reviewed-by: James Morris Signed-off-by: Greg Kroah-Hartman --- crypto/asymmetric_keys/x509_cert_parser.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index dd03fead1ca358..ce2df8c9c58397 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -409,6 +409,8 @@ int x509_extract_key_data(void *context, size_t hdrlen, ctx->cert->pub->pkey_algo = "rsa"; /* Discard the BIT STRING metadata */ + if (vlen < 1 || *(const u8 *)value != 0) + return -EBADMSG; ctx->key = value + 1; ctx->key_size = vlen - 1; return 0; From 70feeaaabf4fabf78812d33facd7f8f993b33011 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 8 Dec 2017 15:13:29 +0000 Subject: [PATCH 0465/3326] X.509: fix comparisons of ->pkey_algo commit 54c1fb39fe0495f846539ab765925b008f86801c upstream. ->pkey_algo used to be an enum, but was changed to a string by commit 4e8ae72a75aa ("X.509: Make algo identifiers text instead of enum"). But two comparisons were not updated. Fix them to use strcmp(). This bug broke signature verification in certain configurations, depending on whether the string constants were deduplicated or not. Fixes: 4e8ae72a75aa ("X.509: Make algo identifiers text instead of enum") Signed-off-by: Eric Biggers Signed-off-by: David Howells Signed-off-by: Greg Kroah-Hartman --- crypto/asymmetric_keys/pkcs7_verify.c | 2 +- crypto/asymmetric_keys/x509_public_key.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c index 2d93d9eccb4d0c..986033e64a838d 100644 --- a/crypto/asymmetric_keys/pkcs7_verify.c +++ b/crypto/asymmetric_keys/pkcs7_verify.c @@ -150,7 +150,7 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7, pr_devel("Sig %u: Found cert serial match X.509[%u]\n", sinfo->index, certix); - if (x509->pub->pkey_algo != sinfo->sig->pkey_algo) { + if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) { pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n", sinfo->index); continue; diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index eea71dc9686c29..1bd0cf71a22d3d 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -135,7 +135,7 @@ int x509_check_for_self_signed(struct x509_certificate *cert) } ret = -EKEYREJECTED; - if (cert->pub->pkey_algo != cert->sig->pkey_algo) + if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0) goto out; ret = public_key_verify_signature(cert->pub, cert->sig); From e7bb5cf984ba767bc7badbb4f2bead35b88783d6 Mon Sep 17 00:00:00 2001 From: Chunyu Hu Date: Mon, 27 Nov 2017 22:21:39 +0800 Subject: [PATCH 0466/3326] x86/idt: Load idt early in start_secondary commit 55d2d0ad2fb4325f615d1950486fbc5e6fba1769 upstream. On a secondary, idt is first loaded in cpu_init() with load_current_idt(), i.e. no exceptions can be handled before that point. The conversion of WARN() to use UD requires the IDT being loaded earlier as any warning between start_secondary() and load_curren_idt() in cpu_init() will result in an unhandled @UD exception and therefore fail the bringup of the CPU. Install the IDT handlers right in start_secondary() before calling cpu_init(). [ tglx: Massaged changelog ] Fixes: 9a93848fe787 ("x86/debug: Implement __WARN() using UD0") Signed-off-by: Chunyu Hu Signed-off-by: Thomas Gleixner Cc: peterz@infradead.org Cc: bp@alien8.de Cc: rostedt@goodmis.org Cc: luto@kernel.org Link: https://lkml.kernel.org/r/1511792499-4073-1-git-send-email-chuhu@redhat.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/smpboot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 65a0ccdc305074..5e0453f18a57c8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -239,7 +239,7 @@ static void notrace start_secondary(void *unused) load_cr3(swapper_pg_dir); __flush_tlb_all(); #endif - + load_current_idt(); cpu_init(); x86_cpuinit.early_percpu_clock_init(); preempt_disable(); From d333778b05edfa13a3eb51f3d98efae7b7c2caa8 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 1 Dec 2017 15:08:12 +0100 Subject: [PATCH 0467/3326] x86/PCI: Make broadcom_postcore_init() check acpi_disabled commit ddec3bdee05b06f1dda20ded003c3e10e4184cab upstream. acpi_os_get_root_pointer() may return a valid address even if acpi_disabled is set, but the host bridge information from the ACPI tables is not going to be used in that case and the Broadcom host bridge initialization should not be skipped then, So make broadcom_postcore_init() check acpi_disabled too to avoid this issue. Fixes: 6361d72b04d1 (x86/PCI: read Broadcom CNB20LE host bridge info before PCI scan) Reported-by: Dave Hansen Signed-off-by: Rafael J. Wysocki Signed-off-by: Thomas Gleixner Cc: Bjorn Helgaas Cc: Linux PCI Link: https://lkml.kernel.org/r/3186627.pxZj1QbYNg@aspire.rjw.lan Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/pci/broadcom_bus.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c index bb461cfd01abc7..526536c81ddc41 100644 --- a/arch/x86/pci/broadcom_bus.c +++ b/arch/x86/pci/broadcom_bus.c @@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void) * We should get host bridge information from ACPI unless the BIOS * doesn't support it. */ - if (acpi_os_get_root_pointer()) + if (!acpi_disabled && acpi_os_get_root_pointer()) return 0; #endif From 58582f04bc87b9d8d848d9163ce3355dd6f00602 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= Date: Thu, 30 Nov 2017 19:05:45 +0100 Subject: [PATCH 0468/3326] KVM: x86: fix APIC page invalidation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit b1394e745b9453dcb5b0671c205b770e87dedb87 upstream. Implementation of the unpinned APIC page didn't update the VMCS address cache when invalidation was done through range mmu notifiers. This became a problem when the page notifier was removed. Re-introduce the arch-specific helper and call it from ...range_start. Reported-by: Fabian Grünbichler Fixes: 38b9917350cb ("kvm: vmx: Implement set_apic_access_page_addr") Fixes: 369ea8242c0f ("mm/rmap: update to new mmu_notifier semantic v2") Reviewed-by: Paolo Bonzini Reviewed-by: Andrea Arcangeli Tested-by: Wanpeng Li Tested-by: Fabian Grünbichler Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/kvm_host.h | 3 +++ arch/x86/kvm/x86.c | 14 ++++++++++++++ virt/kvm/kvm_main.c | 8 ++++++++ 3 files changed, 25 insertions(+) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c73e493adf0748..eb38ac9d9a31c2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1426,4 +1426,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) #endif } +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end); + #endif /* _ASM_X86_KVM_HOST_H */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4195cbcdb3102a..df62cdc7a2582e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6745,6 +6745,20 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu) kvm_x86_ops->tlb_flush(vcpu); } +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end) +{ + unsigned long apic_address; + + /* + * The physical address of apic access page is stored in the VMCS. + * Update it when it becomes invalid. + */ + apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); + if (start <= apic_address && apic_address < end) + kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); +} + void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) { struct page *page = NULL; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9deb5a245b8303..484e8820c382b3 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -136,6 +136,11 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); static unsigned long long kvm_createvm_count; static unsigned long long kvm_active_vms; +__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end) +{ +} + bool kvm_is_reserved_pfn(kvm_pfn_t pfn) { if (pfn_valid(pfn)) @@ -361,6 +366,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); + + kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); + srcu_read_unlock(&kvm->srcu, idx); } From 85ab61fdfa4866cd0465c75fb97a0601fed9973d Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 4 Dec 2017 13:11:45 -0500 Subject: [PATCH 0469/3326] btrfs: fix missing error return in btrfs_drop_snapshot commit e19182c0fff451e3744c1107d98f072e7ca377a0 upstream. If btrfs_del_root fails in btrfs_drop_snapshot, we'll pick up the error but then return 0 anyway due to mixing err and ret. Fixes: 79787eaab4612 ("btrfs: replace many BUG_ONs with proper error handling") Signed-off-by: Jeff Mahoney Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/extent-tree.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index e4774c02d922ea..d227d8514b2567 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -9283,6 +9283,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, ret = btrfs_del_root(trans, fs_info, &root->root_key); if (ret) { btrfs_abort_transaction(trans, ret); + err = ret; goto out_end_trans; } From a121ecb3764e84da656b8d20138997f2fd6aed6f Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 21 Nov 2017 13:58:49 -0500 Subject: [PATCH 0470/3326] btrfs: handle errors while updating refcounts in update_ref_for_cow commit 692826b2738101549f032a761a9191636e83be4e upstream. Since commit fb235dc06fa (btrfs: qgroup: Move half of the qgroup accounting time out of commit trans) the assumption that btrfs_add_delayed_{data,tree}_ref can only return 0 or -ENOMEM has been false. The qgroup operations call into btrfs_search_slot and friends and can now return the full spectrum of error codes. Fortunately, the fix here is easy since update_ref_for_cow failing is already handled so we just need to bail early with the error code. Fixes: fb235dc06fa (btrfs: qgroup: Move half of the qgroup accounting ...) Signed-off-by: Jeff Mahoney Reviewed-by: Edmund Nadolski Reviewed-by: Qu Wenruo Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/ctree.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 6d49db7d86be29..e2bb2a0657419a 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1032,14 +1032,17 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { ret = btrfs_inc_ref(trans, root, buf, 1); - BUG_ON(ret); /* -ENOMEM */ + if (ret) + return ret; if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { ret = btrfs_dec_ref(trans, root, buf, 0); - BUG_ON(ret); /* -ENOMEM */ + if (ret) + return ret; ret = btrfs_inc_ref(trans, root, cow, 1); - BUG_ON(ret); /* -ENOMEM */ + if (ret) + return ret; } new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; } else { @@ -1049,7 +1052,8 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, ret = btrfs_inc_ref(trans, root, cow, 1); else ret = btrfs_inc_ref(trans, root, cow, 0); - BUG_ON(ret); /* -ENOMEM */ + if (ret) + return ret; } if (new_flags != 0) { int level = btrfs_header_level(buf); @@ -1068,9 +1072,11 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, ret = btrfs_inc_ref(trans, root, cow, 1); else ret = btrfs_inc_ref(trans, root, cow, 0); - BUG_ON(ret); /* -ENOMEM */ + if (ret) + return ret; ret = btrfs_dec_ref(trans, root, buf, 1); - BUG_ON(ret); /* -ENOMEM */ + if (ret) + return ret; } clean_tree_block(fs_info, buf); *last_ref = 1; From c6705e4c64eb549c1eb86007438034dd7b7a6129 Mon Sep 17 00:00:00 2001 From: Kailang Yang Date: Tue, 5 Dec 2017 15:38:24 +0800 Subject: [PATCH 0471/3326] ALSA: hda/realtek - New codec support for ALC257 commit f429e7e494afaded76e62c6f98211a635aa03098 upstream. Add new support for ALC257 codec. [ It's supposed to be almost equivalent with other ALC25x variants, just adding another type and id -- tiwai ] Signed-off-by: Kailang Yang Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 7c39114d124fee..b076386c8952d9 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -330,6 +330,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0236: case 0x10ec0255: case 0x10ec0256: + case 0x10ec0257: case 0x10ec0282: case 0x10ec0283: case 0x10ec0286: @@ -2749,6 +2750,7 @@ enum { ALC269_TYPE_ALC298, ALC269_TYPE_ALC255, ALC269_TYPE_ALC256, + ALC269_TYPE_ALC257, ALC269_TYPE_ALC215, ALC269_TYPE_ALC225, ALC269_TYPE_ALC294, @@ -2782,6 +2784,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec) case ALC269_TYPE_ALC298: case ALC269_TYPE_ALC255: case ALC269_TYPE_ALC256: + case ALC269_TYPE_ALC257: case ALC269_TYPE_ALC215: case ALC269_TYPE_ALC225: case ALC269_TYPE_ALC294: @@ -6839,6 +6842,10 @@ static int patch_alc269(struct hda_codec *codec) spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */ alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/ break; + case 0x10ec0257: + spec->codec_variant = ALC269_TYPE_ALC257; + spec->gen.mixer_nid = 0; + break; case 0x10ec0215: case 0x10ec0285: case 0x10ec0289: @@ -7886,6 +7893,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269), HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269), HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0257, "ALC257", patch_alc269), HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260), HDA_CODEC_ENTRY(0x10ec0262, "ALC262", patch_alc262), HDA_CODEC_ENTRY(0x10ec0267, "ALC267", patch_alc268), From 0482dcd51004920b13b59995b7afb66df49937da Mon Sep 17 00:00:00 2001 From: Robb Glasser Date: Tue, 5 Dec 2017 09:16:55 -0800 Subject: [PATCH 0472/3326] ALSA: pcm: prevent UAF in snd_pcm_info commit 362bca57f5d78220f8b5907b875961af9436e229 upstream. When the device descriptor is closed, the `substream->runtime` pointer is freed. But another thread may be in the ioctl handler, case SNDRV_CTL_IOCTL_PCM_INFO. This case calls snd_pcm_info_user() which calls snd_pcm_info() which accesses the now freed `substream->runtime`. Note: this fixes CVE-2017-0861 Signed-off-by: Robb Glasser Signed-off-by: Nick Desaulniers Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/pcm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/core/pcm.c b/sound/core/pcm.c index 7eadb7fd807471..7fea724d093af6 100644 --- a/sound/core/pcm.c +++ b/sound/core/pcm.c @@ -153,7 +153,9 @@ static int snd_pcm_control_ioctl(struct snd_card *card, err = -ENXIO; goto _error; } + mutex_lock(&pcm->open_mutex); err = snd_pcm_info_user(substream, info); + mutex_unlock(&pcm->open_mutex); _error: mutex_unlock(®ister_mutex); return err; From 8a4b29a72a7cac97a44dec1b74ec804e0176cca8 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 30 Nov 2017 10:08:28 +0100 Subject: [PATCH 0473/3326] ALSA: seq: Remove spurious WARN_ON() at timer check commit 43a3542870328601be02fcc9d27b09db467336ef upstream. The use of snd_BUG_ON() in ALSA sequencer timer may lead to a spurious WARN_ON() when a slave timer is deployed as its backend and a corresponding master timer stops meanwhile. The symptom was triggered by syzkaller spontaneously. Since the NULL timer is valid there, rip off snd_BUG_ON(). Reported-by: syzbot Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/seq/seq_timer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c index 37d9cfbc29f9c8..b80985fbc334cc 100644 --- a/sound/core/seq/seq_timer.c +++ b/sound/core/seq/seq_timer.c @@ -355,7 +355,7 @@ static int initialize_timer(struct snd_seq_timer *tmr) unsigned long freq; t = tmr->timeri->timer; - if (snd_BUG_ON(!t)) + if (!t) return -EINVAL; freq = tmr->preferred_resolution; From 3936d752df21f3f83155df3a19ec3617c18741be Mon Sep 17 00:00:00 2001 From: Jaejoong Kim Date: Mon, 4 Dec 2017 15:31:48 +0900 Subject: [PATCH 0474/3326] ALSA: usb-audio: Fix out-of-bound error commit 251552a2b0d454badc8f486e6d79100970c744b0 upstream. The snd_usb_copy_string_desc() retrieves the usb string corresponding to the index number through the usb_string(). The problem is that the usb_string() returns the length of the string (>= 0) when successful, but it can also return a negative value about the error case or status of usb_control_msg(). If iClockSource is '0' as shown below, usb_string() will returns -EINVAL. This will result in '0' being inserted into buf[-22], and the following KASAN out-of-bound error message will be output. AudioControl Interface Descriptor: bLength 8 bDescriptorType 36 bDescriptorSubtype 10 (CLOCK_SOURCE) bClockID 1 bmAttributes 0x07 Internal programmable Clock (synced to SOF) bmControls 0x07 Clock Frequency Control (read/write) Clock Validity Control (read-only) bAssocTerminal 0 iClockSource 0 To fix it, check usb_string()'return value and bail out. ================================================================== BUG: KASAN: stack-out-of-bounds in parse_audio_unit+0x1327/0x1960 [snd_usb_audio] Write of size 1 at addr ffff88007e66735a by task systemd-udevd/18376 CPU: 0 PID: 18376 Comm: systemd-udevd Not tainted 4.13.0+ #3 Hardware name: LG Electronics 15N540-RFLGL/White Tip Mountain, BIOS 15N5 Call Trace: dump_stack+0x63/0x8d print_address_description+0x70/0x290 ? parse_audio_unit+0x1327/0x1960 [snd_usb_audio] kasan_report+0x265/0x350 __asan_store1+0x4a/0x50 parse_audio_unit+0x1327/0x1960 [snd_usb_audio] ? save_stack+0xb5/0xd0 ? save_stack_trace+0x1b/0x20 ? save_stack+0x46/0xd0 ? kasan_kmalloc+0xad/0xe0 ? kmem_cache_alloc_trace+0xff/0x230 ? snd_usb_create_mixer+0xb0/0x4b0 [snd_usb_audio] ? usb_audio_probe+0x4de/0xf40 [snd_usb_audio] ? usb_probe_interface+0x1f5/0x440 ? driver_probe_device+0x3ed/0x660 ? build_feature_ctl+0xb10/0xb10 [snd_usb_audio] ? save_stack_trace+0x1b/0x20 ? init_object+0x69/0xa0 ? snd_usb_find_csint_desc+0xa8/0xf0 [snd_usb_audio] snd_usb_mixer_controls+0x1dc/0x370 [snd_usb_audio] ? build_audio_procunit+0x890/0x890 [snd_usb_audio] ? snd_usb_create_mixer+0xb0/0x4b0 [snd_usb_audio] ? kmem_cache_alloc_trace+0xff/0x230 ? usb_ifnum_to_if+0xbd/0xf0 snd_usb_create_mixer+0x25b/0x4b0 [snd_usb_audio] ? snd_usb_create_stream+0x255/0x2c0 [snd_usb_audio] usb_audio_probe+0x4de/0xf40 [snd_usb_audio] ? snd_usb_autosuspend.part.7+0x30/0x30 [snd_usb_audio] ? __pm_runtime_idle+0x90/0x90 ? kernfs_activate+0xa6/0xc0 ? usb_match_one_id_intf+0xdc/0x130 ? __pm_runtime_set_status+0x2d4/0x450 usb_probe_interface+0x1f5/0x440 Signed-off-by: Jaejoong Kim Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/mixer.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 2b835cca41b16a..0f59da604bbc33 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -204,6 +204,10 @@ static int snd_usb_copy_string_desc(struct mixer_build *state, int index, char *buf, int maxlen) { int len = usb_string(state->chip->dev, index, buf, maxlen - 1); + + if (len < 0) + return 0; + buf[len] = 0; return len; } From 3884d12e17ab770aa0f5d4bc65bfbfd006f417fa Mon Sep 17 00:00:00 2001 From: Jaejoong Kim Date: Mon, 4 Dec 2017 15:31:49 +0900 Subject: [PATCH 0475/3326] ALSA: usb-audio: Add check return value for usb_string() commit 89b89d121ffcf8d9546633b98ded9d18b8f75891 upstream. snd_usb_copy_string_desc() returns zero if usb_string() fails. In case of failure, we need to check the snd_usb_copy_string_desc()'s return value and add an exception case Signed-off-by: Jaejoong Kim Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/mixer.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 0f59da604bbc33..4fde4f8d4444a5 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -2178,13 +2178,14 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, if (len) ; else if (nameid) - snd_usb_copy_string_desc(state, nameid, kctl->id.name, + len = snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name)); - else { + else len = get_term_name(state, &state->oterm, kctl->id.name, sizeof(kctl->id.name), 0); - if (!len) - strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name)); + + if (!len) { + strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name)); if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) append_ctl_name(kctl, " Clock Source"); From ce1079588ebc42a2e3a0d310a2ea1f3a75aa8d49 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 28 Sep 2017 15:14:01 +0100 Subject: [PATCH 0476/3326] iommu/vt-d: Fix scatterlist offset handling commit 29a90b70893817e2f2bb3cea40a29f5308e21b21 upstream. The intel-iommu DMA ops fail to correctly handle scatterlists where sg->offset is greater than PAGE_SIZE - the IOVA allocation is computed appropriately based on the page-aligned portion of the offset, but the mapping is set up relative to sg->page, which means it fails to actually cover the whole buffer (and in the worst case doesn't cover it at all): (sg->dma_address + sg->dma_len) ----+ sg->dma_address ---------+ | iov_pfn------+ | | | | | v v v iova: a b c d e f |--------|--------|--------|--------|--------| <...calculated....> [_____mapped______] pfn: 0 1 2 3 4 5 |--------|--------|--------|--------|--------| ^ ^ ^ | | | sg->page ----+ | | sg->offset --------------+ | (sg->offset + sg->length) ----------+ As a result, the caller ends up overrunning the mapping into whatever lies beyond, which usually goes badly: [ 429.645492] DMAR: DRHD: handling fault status reg 2 [ 429.650847] DMAR: [DMA Write] Request device [02:00.4] fault addr f2682000 ... Whilst this is a fairly rare occurrence, it can happen from the result of intermediate scatterlist processing such as scatterwalk_ffwd() in the crypto layer. Whilst that particular site could be fixed up, it still seems worthwhile to bring intel-iommu in line with other DMA API implementations in handling this robustly. To that end, fix the intel_map_sg() path to line up the mapping correctly (in units of MM pages rather than VT-d pages to match the aligned_nrpages() calculation) regardless of the offset, and use sg_phys() consistently for clarity. Reported-by: Harsh Jain Signed-off-by: Robin Murphy Reviewed by: Ashok Raj Tested by: Jacob Pan Signed-off-by: Alex Williamson Signed-off-by: Greg Kroah-Hartman --- drivers/iommu/intel-iommu.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 6784a05dd6b2d5..83f3d4831f9438 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2254,10 +2254,12 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, uint64_t tmp; if (!sg_res) { + unsigned int pgoff = sg->offset & ~PAGE_MASK; + sg_res = aligned_nrpages(sg->offset, sg->length); - sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; + sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff; sg->dma_length = sg->length; - pteval = page_to_phys(sg_page(sg)) | prot; + pteval = (sg_phys(sg) - pgoff) | prot; phys_pfn = pteval >> VTD_PAGE_SHIFT; } @@ -3790,7 +3792,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, for_each_sg(sglist, sg, nelems, i) { BUG_ON(!sg_page(sg)); - sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; + sg->dma_address = sg_phys(sg); sg->dma_length = sg->length; } return nelems; From b68df97ec8d0fd2074c130a64c831b8b760f1228 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 28 Nov 2017 21:19:53 +0800 Subject: [PATCH 0477/3326] smp/hotplug: Move step CPUHP_AP_SMPCFD_DYING to the correct place commit 46febd37f9c758b05cd25feae8512f22584742fe upstream. Commit 31487f8328f2 ("smp/cfd: Convert core to hotplug state machine") accidently put this step on the wrong place. The step should be at the cpuhp_ap_states[] rather than the cpuhp_bp_states[]. grep smpcfd /sys/devices/system/cpu/hotplug/states 40: smpcfd:prepare 129: smpcfd:dying "smpcfd:dying" was missing before. So was the invocation of the function smpcfd_dying_cpu(). Fixes: 31487f8328f2 ("smp/cfd: Convert core to hotplug state machine") Signed-off-by: Lai Jiangshan Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Richard Weinberger Cc: Sebastian Andrzej Siewior Cc: Boris Ostrovsky Link: https://lkml.kernel.org/r/20171128131954.81229-1-jiangshanlai@gmail.com Signed-off-by: Greg Kroah-Hartman --- kernel/cpu.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/cpu.c b/kernel/cpu.c index 04892a82f6ac36..7891aecc6aec79 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1289,11 +1289,6 @@ static struct cpuhp_step cpuhp_bp_states[] = { .teardown.single = NULL, .cant_stop = true, }, - [CPUHP_AP_SMPCFD_DYING] = { - .name = "smpcfd:dying", - .startup.single = NULL, - .teardown.single = smpcfd_dying_cpu, - }, /* * Handled on controll processor until the plugged processor manages * this itself. @@ -1335,6 +1330,11 @@ static struct cpuhp_step cpuhp_ap_states[] = { .startup.single = NULL, .teardown.single = rcutree_dying_cpu, }, + [CPUHP_AP_SMPCFD_DYING] = { + .name = "smpcfd:dying", + .startup.single = NULL, + .teardown.single = smpcfd_dying_cpu, + }, /* Entry state on starting. Interrupts enabled from here on. Transient * state for synchronsization */ [CPUHP_AP_ONLINE] = { From cb90fcfcdd9062453c463622be3029548966f34a Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 20 Nov 2017 12:38:44 +0100 Subject: [PATCH 0478/3326] s390: always save and restore all registers on context switch commit fbbd7f1a51965b50dd12924841da0d478f3da71b upstream. The switch_to() macro has an optimization to avoid saving and restoring register contents that aren't needed for kernel threads. There is however the possibility that a kernel thread execve's a user space program. In such a case the execve'd process can partially see the contents of the previous process, which shouldn't be allowed. To avoid this, simply always save and restore register contents on context switch. Fixes: fdb6d070effba ("switch_to: dont restore/save access & fpu regs for kernel threads") Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Greg Kroah-Hartman --- arch/s390/include/asm/switch_to.h | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index ec7b476c1ac571..c61b2cc1a8a86a 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -30,21 +30,20 @@ static inline void restore_access_regs(unsigned int *acrs) asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs)); } -#define switch_to(prev,next,last) do { \ - if (prev->mm) { \ - save_fpu_regs(); \ - save_access_regs(&prev->thread.acrs[0]); \ - save_ri_cb(prev->thread.ri_cb); \ - save_gs_cb(prev->thread.gs_cb); \ - } \ +#define switch_to(prev, next, last) do { \ + /* save_fpu_regs() sets the CIF_FPU flag, which enforces \ + * a restore of the floating point / vector registers as \ + * soon as the next task returns to user space \ + */ \ + save_fpu_regs(); \ + save_access_regs(&prev->thread.acrs[0]); \ + save_ri_cb(prev->thread.ri_cb); \ + save_gs_cb(prev->thread.gs_cb); \ update_cr_regs(next); \ - if (next->mm) { \ - set_cpu_flag(CIF_FPU); \ - restore_access_regs(&next->thread.acrs[0]); \ - restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ - restore_gs_cb(next->thread.gs_cb); \ - } \ - prev = __switch_to(prev,next); \ + restore_access_regs(&next->thread.acrs[0]); \ + restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ + restore_gs_cb(next->thread.gs_cb); \ + prev = __switch_to(prev, next); \ } while (0) #endif /* __ASM_SWITCH_TO_H */ From eb7fb979f090997654d70c24d975e8d3c5711b32 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 4 Dec 2017 09:42:45 +0100 Subject: [PATCH 0479/3326] s390/mm: fix off-by-one bug in 5-level page table handling commit 8d306f53b63099fec2d56300149e400d181ba4f5 upstream. Martin Cermak reported that setting a uprobe doesn't work. Reason for this is that the common uprobes code tries to get an unmapped area at the last possible page within an address space. This broke with commit 1aea9b3f9210 ("s390/mm: implement 5 level pages tables") which introduced an off-by-one bug which prevents to map anything at the last possible page within an address space. The check with the off-by-one bug however can be removed since with commit 8ab867cb0806 ("s390/mm: fix BUG_ON in crst_table_upgrade") the necessary check is done at both call sites. Reported-by: Martin Cermak Bisected-by: Thomas Richter Fixes: 1aea9b3f9210 ("s390/mm: implement 5 level pages tables") Reviewed-by: Hendrik Brueckner Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Greg Kroah-Hartman --- arch/s390/mm/pgalloc.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index cc2faffa7d6ef4..334b6d103cbd19 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -85,8 +85,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end) /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE); - if (end >= TASK_SIZE_MAX) - return -ENOMEM; rc = 0; notify = 0; while (mm->context.asce_limit < end) { From 0fad60d717bccf42b60b45969bcddba577785d5d Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 6 Dec 2017 16:11:27 +0100 Subject: [PATCH 0480/3326] s390: fix compat system call table commit e779498df587dd2189b30fe5b9245aefab870eb8 upstream. When wiring up the socket system calls the compat entries were incorrectly set. Not all of them point to the corresponding compat wrapper functions, which clear the upper 33 bits of user space pointers, like it is required. Fixes: 977108f89c989 ("s390: wire up separate socketcalls system calls") Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Greg Kroah-Hartman --- arch/s390/kernel/syscalls.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index d39f121e67a987..bc905ae1d5c8d4 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -370,10 +370,10 @@ SYSCALL(sys_recvmmsg,compat_sys_recvmmsg) SYSCALL(sys_sendmmsg,compat_sys_sendmmsg) SYSCALL(sys_socket,sys_socket) SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */ -SYSCALL(sys_bind,sys_bind) -SYSCALL(sys_connect,sys_connect) +SYSCALL(sys_bind,compat_sys_bind) +SYSCALL(sys_connect,compat_sys_connect) SYSCALL(sys_listen,sys_listen) -SYSCALL(sys_accept4,sys_accept4) +SYSCALL(sys_accept4,compat_sys_accept4) SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */ SYSCALL(sys_setsockopt,compat_sys_setsockopt) SYSCALL(sys_getsockname,compat_sys_getsockname) From 59d97bf77a98adb08ce98073842374f17d7876de Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Mon, 4 Dec 2017 12:19:11 +0100 Subject: [PATCH 0481/3326] KVM: s390: Fix skey emulation permission check commit ca76ec9ca871e67d8cd0b6caba24aca3d3ac4546 upstream. All skey functions call skey_check_enable at their start, which checks if we are in the PSTATE and injects a privileged operation exception if we are. Unfortunately they continue processing afterwards and perform the operation anyhow as skey_check_enable does not deliver an error if the exception injection was successful. Let's move the PSTATE check into the skey functions and exit them on such an occasion, also we now do not enable skey handling anymore in such a case. Signed-off-by: Janosch Frank Reviewed-by: Christian Borntraeger Fixes: a7e19ab ("KVM: s390: handle missing storage-key facility") Reviewed-by: Cornelia Huck Reviewed-by: Thomas Huth Signed-off-by: Christian Borntraeger Signed-off-by: Greg Kroah-Hartman --- arch/s390/kvm/priv.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index c954ac49eee471..5b25287f449b7e 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -235,8 +235,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu) VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); return -EAGAIN; } - if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) - return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); return 0; } @@ -247,6 +245,9 @@ static int handle_iske(struct kvm_vcpu *vcpu) int reg1, reg2; int rc; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + rc = try_handle_skey(vcpu); if (rc) return rc != -EAGAIN ? rc : 0; @@ -276,6 +277,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu) int reg1, reg2; int rc; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + rc = try_handle_skey(vcpu); if (rc) return rc != -EAGAIN ? rc : 0; @@ -311,6 +315,9 @@ static int handle_sske(struct kvm_vcpu *vcpu) int reg1, reg2; int rc; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + rc = try_handle_skey(vcpu); if (rc) return rc != -EAGAIN ? rc : 0; From d46be2f67c1088077e35ab87da6fc2b0d367fb88 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Mon, 4 Dec 2017 16:27:25 +1100 Subject: [PATCH 0482/3326] Revert "powerpc: Do not call ppc_md.panic in fadump panic notifier" commit ab9dbf771ff9b6b7e814e759213ed01d7f0de320 upstream. This reverts commit a3b2cb30f252b21a6f962e0dd107c8b897ca65e4. That commit tried to fix problems with panic on powerpc in certain circumstances, where some output from the generic panic code was being dropped. Unfortunately, it breaks things worse in other circumstances. In particular when running a PAPR guest, it will now attempt to reboot instead of informing the hypervisor (KVM or PowerVM) that the guest has crashed. The crash notification is important to some virtualization management layers. Revert it for now until we can come up with a better solution. Fixes: a3b2cb30f252 ("powerpc: Do not call ppc_md.panic in fadump panic notifier") Signed-off-by: David Gibson [mpe: Tweak change log a bit] Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/include/asm/machdep.h | 1 + arch/powerpc/include/asm/setup.h | 1 + arch/powerpc/kernel/fadump.c | 22 --------------------- arch/powerpc/kernel/setup-common.c | 27 ++++++++++++++++++++++++++ arch/powerpc/platforms/ps3/setup.c | 15 ++++++++++++++ arch/powerpc/platforms/pseries/setup.c | 1 + 6 files changed, 45 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 73b92017b6d7b5..cd2fc1cc1cc7c0 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -76,6 +76,7 @@ struct machdep_calls { void __noreturn (*restart)(char *cmd); void __noreturn (*halt)(void); + void (*panic)(char *str); void (*cpu_die)(void); long (*time_init)(void); /* Optional, may be NULL */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 257d23dbf55dce..cf00ec26303aef 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -24,6 +24,7 @@ extern void reloc_got2(unsigned long); void check_for_initrd(void); void initmem_init(void); +void setup_panic(void); #define ARCH_PANIC_TIMEOUT 180 #ifdef CONFIG_PPC_PSERIES diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index e1431800bfb9f7..29d2b60501408e 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1453,25 +1453,6 @@ static void fadump_init_files(void) return; } -static int fadump_panic_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - /* - * If firmware-assisted dump has been registered then trigger - * firmware-assisted dump and let firmware handle everything - * else. If this returns, then fadump was not registered, so - * go through the rest of the panic path. - */ - crash_fadump(NULL, ptr); - - return NOTIFY_DONE; -} - -static struct notifier_block fadump_panic_block = { - .notifier_call = fadump_panic_event, - .priority = INT_MIN /* may not return; must be done last */ -}; - /* * Prepare for firmware-assisted dump. */ @@ -1504,9 +1485,6 @@ int __init setup_fadump(void) init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start); fadump_init_files(); - atomic_notifier_chain_register(&panic_notifier_list, - &fadump_panic_block); - return 1; } subsys_initcall(setup_fadump); diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 2e3bc16d02b289..90bc20efb4c7b7 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -704,6 +704,30 @@ int check_legacy_ioport(unsigned long base_port) } EXPORT_SYMBOL(check_legacy_ioport); +static int ppc_panic_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + /* + * If firmware-assisted dump has been registered then trigger + * firmware-assisted dump and let firmware handle everything else. + */ + crash_fadump(NULL, ptr); + ppc_md.panic(ptr); /* May not return */ + return NOTIFY_DONE; +} + +static struct notifier_block ppc_panic_block = { + .notifier_call = ppc_panic_event, + .priority = INT_MIN /* may not return; must be done last */ +}; + +void __init setup_panic(void) +{ + if (!ppc_md.panic) + return; + atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); +} + #ifdef CONFIG_CHECK_CACHE_COHERENCY /* * For platforms that have configurable cache-coherency. This function @@ -848,6 +872,9 @@ void __init setup_arch(char **cmdline_p) /* Probe the machine type, establish ppc_md. */ probe_machine(); + /* Setup panic notifier if requested by the platform. */ + setup_panic(); + /* * Configure ppc_md.power_save (ppc32 only, 64-bit machines do * it from their respective probe() function. diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 9dabea6e144396..6244bc849469e3 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -104,6 +104,20 @@ static void __noreturn ps3_halt(void) ps3_sys_manager_halt(); /* never returns */ } +static void ps3_panic(char *str) +{ + DBG("%s:%d %s\n", __func__, __LINE__, str); + + smp_send_stop(); + printk("\n"); + printk(" System does not reboot automatically.\n"); + printk(" Please press POWER button.\n"); + printk("\n"); + + while(1) + lv1_pause(1); +} + #if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \ defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE) static void __init prealloc(struct ps3_prealloc *p) @@ -255,6 +269,7 @@ define_machine(ps3) { .probe = ps3_probe, .setup_arch = ps3_setup_arch, .init_IRQ = ps3_init_IRQ, + .panic = ps3_panic, .get_boot_time = ps3_get_boot_time, .set_dabr = ps3_set_dabr, .calibrate_decr = ps3_calibrate_decr, diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 5f1beb8367acaa..a8531e01265842 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -726,6 +726,7 @@ define_machine(pseries) { .pcibios_fixup = pSeries_final_fixup, .restart = rtas_restart, .halt = rtas_halt, + .panic = rtas_os_term, .get_boot_time = rtas_get_boot_time, .get_rtc_time = rtas_get_rtc_time, .set_rtc_time = rtas_set_rtc_time, From f11f5c4b87164c88da85193430f36d7d14bdcec4 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 6 Dec 2017 18:21:14 +1000 Subject: [PATCH 0483/3326] powerpc/64s: Initialize ISAv3 MMU registers before setting partition table commit 371b80447ff33ddac392c189cf884a5a3e18faeb upstream. kexec can leave MMU registers set when booting into a new kernel, the PIDR (Process Identification Register) in particular. The boot sequence does not zero PIDR, so it only gets set when CPUs first switch to a userspace processes (until then it's running a kernel thread with effective PID = 0). This leaves a window where a process table entry and page tables are set up due to user processes running on other CPUs, that happen to match with a stale PID. The CPU with that PID may cause speculative accesses that address quadrant 0 (aka userspace addresses), which will result in cached translations and PWC (Page Walk Cache) for that process, on a CPU which is not in the mm_cpumask and so they will not be invalidated properly. The most common result is the kernel hanging in infinite page fault loops soon after kexec (usually in schedule_tail, which is usually the first non-speculative quadrant 0 access to a new PID) due to a stale PWC. However being a stale translation error, it could result in anything up to security and data corruption problems. Fix this by zeroing out PIDR at boot and kexec. Fixes: 7e381c0ff618 ("powerpc/mm/radix: Add mmu context handling callback for radix") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/cpu_setup_power.S | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 610955fe8b81c5..679bbe714e8561 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -102,6 +102,7 @@ _GLOBAL(__setup_cpu_power9) li r0,0 mtspr SPRN_PSSCR,r0 mtspr SPRN_LPID,r0 + mtspr SPRN_PID,r0 mfspr r3,SPRN_LPCR LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) or r3, r3, r4 @@ -126,6 +127,7 @@ _GLOBAL(__restore_cpu_power9) li r0,0 mtspr SPRN_PSSCR,r0 mtspr SPRN_LPID,r0 + mtspr SPRN_PID,r0 mfspr r3,SPRN_LPCR LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) or r3, r3, r4 From 3b1b28d246c333e9b8ce84e294ae4cad09cfc909 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 8 Feb 2016 23:30:47 +0200 Subject: [PATCH 0484/3326] iwlwifi: mvm: mark MIC stripped MPDUs commit bf19037074e770aad74b3b90f37b8b98db3f3748 upstream. When RADA is active, the hardware decrypts the packets and strips off the MIC as it is useless after decryption. Indicate that to mac80211. [this is needed for the 9000-series HW to work properly] Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 248699c2c4bff0..402a743b0ad742 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -253,6 +253,8 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, return -1; stats->flag |= RX_FLAG_DECRYPTED; + if (pkt_flags & FH_RSCSR_RADA_EN) + stats->flag |= RX_FLAG_MIC_STRIPPED; *crypt_len = IEEE80211_CCMP_HDR_LEN; return 0; case IWL_RX_MPDU_STATUS_SEC_TKIP: From 3e6c41d3ed846e6e5120e9c139778a8788212ff1 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 13 Nov 2017 09:50:47 +0200 Subject: [PATCH 0485/3326] iwlwifi: mvm: don't use transmit queue hang detection when it is not possible commit 0b9832b712d6767d6c7b01965fd788d1ca84fc92 upstream. When we act as an AP, new firmware versions handle internally the power saving clients and the driver doesn't know that the peers went to sleep. It is, hence, possible that a peer goes to sleep for a long time and stop pulling frames. This will cause its transmit queue to hang which is a condition that triggers the recovery flow in the driver. While this client is certainly buggy (it should have pulled the frame based on the TIM IE in the beacon), we can't blow up because of a buggy client. Change the current implementation to not enable the transmit queue hang detection on queues that serve peers when we act as an AP / GO. We can still enable this mechanism using the debug configuration which can come in handy when we want to debug why the client doesn't wake up. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 2ea74abad73de4..53e269d5405056 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1143,9 +1143,18 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, unsigned int default_timeout = cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) { + /* + * We can't know when the station is asleep or awake, so we + * must disable the queue hang detection. + */ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) && + vif && vif->type == NL80211_IFTYPE_AP) + return IWL_WATCHDOG_DISABLED; return iwlmvm_mod_params.tfd_q_hang_detect ? default_timeout : IWL_WATCHDOG_DISABLED; + } trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); txq_timer = (void *)trigger->data; From 60e942644974fe4254b8c6ab6cbbb6e69e88a3f8 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 13 Nov 2017 17:26:09 +0100 Subject: [PATCH 0486/3326] iwlwifi: mvm: flush queue before deleting ROC commit 6c2d49fdc5d947c5fe89935bd52e69f10000f4cb upstream. Before deleting a time event (remain-on-channel instance), flush the queue so that frames cannot get stuck on it. We already flush the AUX STA queues, but a separate station is used for the P2P Device queue. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 2 ++ .../wireless/intel/iwlwifi/mvm/time-event.c | 24 +++++++++++++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 8dcdb522b84678..614d488873551f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1042,6 +1042,7 @@ struct iwl_mvm { * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running + * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA */ enum iwl_mvm_status { IWL_MVM_STATUS_HW_RFKILL, @@ -1053,6 +1054,7 @@ enum iwl_mvm_status { IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_D3_RECONFIG, IWL_MVM_STATUS_FIRMWARE_RUNNING, + IWL_MVM_STATUS_NEED_FLUSH_P2P, }; /* Keep track of completed init configuration */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 4d0314912e9479..e25cda9fbf6c34 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -132,6 +132,24 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) * executed, and a new time event means a new command. */ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC); + + /* Do the same for the P2P device queue (STA) */ + if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) { + struct iwl_mvm_vif *mvmvif; + + /* + * NB: access to this pointer would be racy, but the flush bit + * can only be set when we had a P2P-Device VIF, and we have a + * flush of this work in iwl_mvm_prepare_mac_removal() so it's + * not really racy. + */ + + if (!WARN_ON(!mvm->p2p_device_vif)) { + mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); + iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, + CMD_ASYNC); + } + } } static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) @@ -855,10 +873,12 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); - if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { iwl_mvm_remove_time_event(mvm, mvmvif, te_data); - else + set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); + } else { iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); + } iwl_mvm_roc_finished(mvm); } From 26d4d23ae67fc3097e73f50fd55a6cb9fbce2a1e Mon Sep 17 00:00:00 2001 From: Ihab Zhaika Date: Thu, 16 Nov 2017 09:29:19 +0200 Subject: [PATCH 0487/3326] iwlwifi: add new cards for 9260 and 22000 series commit 567deca8e72df3ceb6c07c63f8541a4928f64d3b upstream. add 1 PCI ID for 9260 series and 1 for 22000 series. Signed-off-by: Ihab Zhaika Signed-off-by: Luca Coelho Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 548e1928430d0d..0f7bd37bf1728f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -551,6 +551,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)}, @@ -662,6 +663,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)}, #endif /* CONFIG_IWLMVM */ From 0d46809c6b84cde3d7379f8877bad0528c038460 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 19 Nov 2017 10:35:14 +0200 Subject: [PATCH 0488/3326] iwlwifi: mvm: fix packet injection commit b13f43a48571f0cd0fda271b5046b65f1f268db5 upstream. We need to have a station and a queue for the monitor interface to be able to inject traffic. We used to have this traffic routed to the auxiliary queue, but this queue isn't scheduled for the station we had linked to the monitor vif. Allocate a new queue, link it to the monitor vif's station and make that queue use the BE fifo. This fixes https://bugzilla.kernel.org/show_bug.cgi?id=196715 Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho Signed-off-by: Greg Kroah-Hartman --- .../net/wireless/intel/iwlwifi/fw/api/txq.h | 4 ++ .../net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 1 + drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 53 ++++++++++++++----- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 3 +- 6 files changed, 49 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index 87b4434224a1f0..dfa111bb411e5b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -68,6 +68,9 @@ * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames + * @IWL_MVM_DQA_INJECT_MONITOR_QUEUE: a queue reserved for injection using + * monitor mode. Note this queue is the same as the queue for P2P device + * but we can't have active monitor mode along with P2P device anyway. * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure * that we are never left without the possibility to connect to an AP. @@ -87,6 +90,7 @@ enum iwl_mvm_dqa_txq { IWL_MVM_DQA_CMD_QUEUE = 0, IWL_MVM_DQA_AUX_QUEUE = 1, IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, + IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2, IWL_MVM_DQA_GCAST_QUEUE = 3, IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index a2bf530eeae49e..2f22e14e00fe88 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -787,7 +787,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; - u32 tfd_queue_msk = 0; + u32 tfd_queue_msk = BIT(mvm->snif_queue); int ret; WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 614d488873551f..2ec27ceb8af9a1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -954,6 +954,7 @@ struct iwl_mvm { /* Tx queues */ u16 aux_queue; + u16 snif_queue; u16 probe_queue; u16 p2p_dev_queue; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 231878969332d9..9fb40955d5f4f2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -622,6 +622,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; + mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index c4a343534c5ead..0d7929799942f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1700,29 +1700,29 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) sta->sta_id = IWL_MVM_INVALID_STA; } -static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) +static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, + u8 sta_id, u8 fifo) { unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? mvm->cfg->base_params->wd_timeout : IWL_WATCHDOG_DISABLED; if (iwl_mvm_has_new_tx_api(mvm)) { - int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue, - mvm->aux_sta.sta_id, - IWL_MAX_TID_COUNT, - wdg_timeout); - mvm->aux_queue = queue; + int tvqm_queue = + iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id, + IWL_MAX_TID_COUNT, + wdg_timeout); + *queue = tvqm_queue; } else { struct iwl_trans_txq_scd_cfg cfg = { - .fifo = IWL_MVM_TX_FIFO_MCAST, - .sta_id = mvm->aux_sta.sta_id, + .fifo = fifo, + .sta_id = sta_id, .tid = IWL_MAX_TID_COUNT, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; - iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, - wdg_timeout); + iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout); } } @@ -1741,7 +1741,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) /* Map Aux queue to fifo - needs to happen before adding Aux station */ if (!iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_enable_aux_queue(mvm); + iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, + mvm->aux_sta.sta_id, + IWL_MVM_TX_FIFO_MCAST); ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, MAC_INDEX_AUX, 0); @@ -1755,7 +1757,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_enable_aux_queue(mvm); + iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, + mvm->aux_sta.sta_id, + IWL_MVM_TX_FIFO_MCAST); return 0; } @@ -1763,10 +1767,31 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; lockdep_assert_held(&mvm->mutex); - return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, + + /* Map snif queue to fifo - must happen before adding snif station */ + if (!iwl_mvm_has_new_tx_api(mvm)) + iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, + mvm->snif_sta.sta_id, + IWL_MVM_TX_FIFO_BE); + + ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, mvmvif->id, 0); + if (ret) + return ret; + + /* + * For 22000 firmware and on we cannot add queue to a station unknown + * to firmware so enable queue here - after the station was added + */ + if (iwl_mvm_has_new_tx_api(mvm)) + iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, + mvm->snif_sta.sta_id, + IWL_MVM_TX_FIFO_BE); + + return 0; } int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -1775,6 +1800,8 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); + iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue, + IWL_MAX_TID_COUNT, 0); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 6f2e2af23219a2..887a504ce64a5e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -657,7 +657,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (ap_sta_id != IWL_MVM_INVALID_STA) sta_id = ap_sta_id; } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { - queue = mvm->aux_queue; + queue = mvm->snif_queue; + sta_id = mvm->snif_sta.sta_id; } } From 15f36a5ea2278e3ee3fecbdac21895886ee80294 Mon Sep 17 00:00:00 2001 From: David Spinadel Date: Mon, 21 Nov 2016 17:01:25 +0200 Subject: [PATCH 0489/3326] iwlwifi: mvm: enable RX offloading with TKIP and WEP commit 9d0fc5a50a0548f8e5d61243e5e5f26d5c405aef upstream. Set the flag that indicates that ICV was stripped on if this option was enabled in the HW. [this is needed for the 9000-series HW to work properly] Signed-off-by: David Spinadel Signed-off-by: Luca Coelho Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/iwl-trans.h | 4 +++- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 12 +++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index e90abbfba71829..ecd5c1df811ca0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -117,6 +117,7 @@ #define FH_RSCSR_FRAME_INVALID 0x55550000 #define FH_RSCSR_FRAME_ALIGN 0x40 #define FH_RSCSR_RPA_EN BIT(25) +#define FH_RSCSR_RADA_EN BIT(26) #define FH_RSCSR_RXQ_POS 16 #define FH_RSCSR_RXQ_MASK 0x3F0000 @@ -128,7 +129,8 @@ struct iwl_rx_packet { * 31: flag flush RB request * 30: flag ignore TC (terminal counter) request * 29: flag fast IRQ request - * 28-26: Reserved + * 28-27: Reserved + * 26: RADA enabled * 25: Offload enabled * 24: RPF enabled * 23: RSS enabled diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 402a743b0ad742..819e6f66a5b5ca 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -232,8 +232,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_rx_status *stats, - struct iwl_rx_mpdu_desc *desc, int queue, - u8 *crypt_len) + struct iwl_rx_mpdu_desc *desc, u32 pkt_flags, + int queue, u8 *crypt_len) { u16 status = le16_to_cpu(desc->status); @@ -272,6 +272,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_WEP) *crypt_len = IEEE80211_WEP_IV_LEN; + + if (pkt_flags & FH_RSCSR_RADA_EN) + stats->flag |= RX_FLAG_ICV_STRIPPED; + return 0; case IWL_RX_MPDU_STATUS_SEC_EXT_ENC: if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) @@ -812,7 +816,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status = IEEE80211_SKB_RXCB(skb); - if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) { + if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, + le32_to_cpu(pkt->len_n_flags), queue, + &crypt_len)) { kfree_skb(skb); return; } From 01b43f2e3cad60c626daa9b174667a202cee6987 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Sat, 25 Nov 2017 21:39:25 +0100 Subject: [PATCH 0490/3326] brcmfmac: change driver unbind order of the sdio function devices commit 5c3de777bdaf48bd0cfb43097c0d0fb85056cab7 upstream. In the function brcmf_sdio_firmware_callback() the driver is unbound from the sdio function devices in the error path. However, the order in which it is done resulted in a use-after-free issue (see brcmf_ops_sdio_remove() in bcmsdh.c). Hence change the order and first unbind sdio function #2 device and then unbind sdio function #1 device. Fixes: 7a51461fc2da ("brcmfmac: unbind all devices upon failure in firmware callback") Reported-by: Stefan Wahren Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 613caca7dc020a..b3fa8ae804650f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -4096,8 +4096,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, sdio_release_host(sdiodev->func[1]); fail: brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); - device_release_driver(dev); device_release_driver(&sdiodev->func[2]->dev); + device_release_driver(dev); } struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) From 425704be0968368005eabbf5c68182a4d4b23c8b Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Mon, 2 Mar 2015 14:13:36 +0000 Subject: [PATCH 0491/3326] kdb: Fix handling of kallsyms_symbol_next() return value commit c07d35338081d107e57cf37572d8cc931a8e32e2 upstream. kallsyms_symbol_next() returns a boolean (true on success). Currently kdb_read() tests the return value with an inequality that unconditionally evaluates to true. This is fixed in the obvious way and, since the conditional branch is supposed to be unreachable, we also add a WARN_ON(). Reported-by: Dan Carpenter Signed-off-by: Daniel Thompson Signed-off-by: Jason Wessel Signed-off-by: Greg Kroah-Hartman --- kernel/debug/kdb/kdb_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index e74be38245adf7..ed5d34925ad061 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -350,7 +350,7 @@ static char *kdb_read(char *buffer, size_t bufsize) } kdb_printf("\n"); for (i = 0; i < count; i++) { - if (kallsyms_symbol_next(p_tmp, i) < 0) + if (WARN_ON(!kallsyms_symbol_next(p_tmp, i))) break; kdb_printf("%s ", p_tmp); *(p_tmp + len) = '\0'; From 55b26ae24c64ee66188952daacb00aa33bd29407 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Sun, 19 Nov 2017 22:17:00 -0800 Subject: [PATCH 0492/3326] md/r5cache: move mddev_lock() out of r5c_journal_mode_set() commit ff35f58e8f8eb520367879a0ccc6f2ec4b62b17b upstream. r5c_journal_mode_set() is called by r5c_journal_mode_store() and raid_ctr() in dm-raid. We don't need mddev_lock() when calling from raid_ctr(). This patch fixes this by moves the mddev_lock() to r5c_journal_mode_store(). Signed-off-by: Song Liu Signed-off-by: Shaohua Li Signed-off-by: Greg Kroah-Hartman --- drivers/md/raid5-cache.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 0b7406ac8ce1d9..9a340728b84694 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -2571,31 +2571,22 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) int r5c_journal_mode_set(struct mddev *mddev, int mode) { struct r5conf *conf; - int err; if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || mode > R5C_JOURNAL_MODE_WRITE_BACK) return -EINVAL; - err = mddev_lock(mddev); - if (err) - return err; conf = mddev->private; - if (!conf || !conf->log) { - mddev_unlock(mddev); + if (!conf || !conf->log) return -ENODEV; - } if (raid5_calc_degraded(conf) > 0 && - mode == R5C_JOURNAL_MODE_WRITE_BACK) { - mddev_unlock(mddev); + mode == R5C_JOURNAL_MODE_WRITE_BACK) return -EINVAL; - } mddev_suspend(mddev); conf->log->r5c_journal_mode = mode; mddev_resume(mddev); - mddev_unlock(mddev); pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", mdname(mddev), mode, r5c_journal_mode_str[mode]); @@ -2608,6 +2599,7 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev, { int mode = ARRAY_SIZE(r5c_journal_mode_str); size_t len = length; + int ret; if (len < 2) return -EINVAL; @@ -2619,8 +2611,12 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev, if (strlen(r5c_journal_mode_str[mode]) == len && !strncmp(page, r5c_journal_mode_str[mode], len)) break; - - return r5c_journal_mode_set(mddev, mode) ?: length; + ret = mddev_lock(mddev); + if (ret) + return ret; + ret = r5c_journal_mode_set(mddev, mode); + mddev_unlock(mddev); + return ret ?: length; } struct md_sysfs_entry From 25df8b009734b241ce581ac7505efc587cf0402e Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Tue, 21 Nov 2017 08:49:36 +0100 Subject: [PATCH 0493/3326] drm/bridge: analogix dp: Fix runtime PM state in get_modes() callback commit 510353a63796d467b41237ab4f136136f68c297d upstream. get_modes() callback might be called asynchronously from the DRM core and it is not synchronized with bridge_enable(), which sets proper runtime PM state of the main DP device. Fix this by calling pm_runtime_get_sync() before calling drm_get_edid(), which in turn calls drm_dp_i2c_xfer() and analogix_dp_transfer() to ensure that main DP device is runtime active when doing any access to its registers. This fixes the following kernel issue on Samsung Exynos5250 Snow board: Unhandled fault: imprecise external abort (0x406) at 0x00000000 pgd = c0004000 [00000000] *pgd=00000000 Internal error: : 406 [#1] PREEMPT SMP ARM Modules linked in: CPU: 0 PID: 62 Comm: kworker/0:2 Not tainted 4.13.0-rc2-00364-g4a97a3da420b #3357 Hardware name: SAMSUNG EXYNOS (Flattened Device Tree) Workqueue: events output_poll_execute task: edc14800 task.stack: edcb2000 PC is at analogix_dp_transfer+0x15c/0x2fc LR is at analogix_dp_transfer+0x134/0x2fc pc : [] lr : [] psr: 60000013 sp : edcb3be8 ip : 0000002a fp : 00000001 r10: 00000000 r9 : edcb3cd8 r8 : edcb3c40 r7 : 00000000 r6 : edd3b380 r5 : edd3b010 r4 : 00000064 r3 : 00000000 r2 : f0ad3000 r1 : edcb3c40 r0 : edd3b010 Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none Control: 10c5387d Table: 4000406a DAC: 00000051 Process kworker/0:2 (pid: 62, stack limit = 0xedcb2210) Stack: (0xedcb3be8 to 0xedcb4000) [] (analogix_dp_transfer) from [] (drm_dp_i2c_do_msg+0x8c/0x2b4) [] (drm_dp_i2c_do_msg) from [] (drm_dp_i2c_xfer+0x98/0x214) [] (drm_dp_i2c_xfer) from [] (__i2c_transfer+0x140/0x29c) [] (__i2c_transfer) from [] (i2c_transfer+0x70/0xe4) [] (i2c_transfer) from [] (drm_do_probe_ddc_edid+0xb4/0x114) [] (drm_do_probe_ddc_edid) from [] (drm_probe_ddc+0x18/0x28) [] (drm_probe_ddc) from [] (drm_get_edid+0x124/0x2d4) [] (drm_get_edid) from [] (analogix_dp_get_modes+0x90/0x114) [] (analogix_dp_get_modes) from [] (drm_helper_probe_single_connector_modes+0x198/0x68c) [] (drm_helper_probe_single_connector_modes) from [] (drm_setup_crtcs+0x1b4/0xd18) [] (drm_setup_crtcs) from [] (drm_fb_helper_hotplug_event+0x94/0xd0) [] (drm_fb_helper_hotplug_event) from [] (drm_kms_helper_hotplug_event+0x24/0x28) [] (drm_kms_helper_hotplug_event) from [] (output_poll_execute+0x6c/0x174) [] (output_poll_execute) from [] (process_one_work+0x188/0x3fc) [] (process_one_work) from [] (worker_thread+0x30/0x4b8) [] (worker_thread) from [] (kthread+0x128/0x164) [] (kthread) from [] (ret_from_fork+0x14/0x24) Code: 0a000002 ea000009 e2544001 0a00004a (e59537c8) ---[ end trace cddc7919c79f7878 ]--- Reported-by: Misha Komarovskiy Signed-off-by: Marek Szyprowski Signed-off-by: Greg Kroah-Hartman Signed-off-by: Archit Taneja Link: https://patchwork.freedesktop.org/patch/msgid/20171121074936.22520-1-m.szyprowski@samsung.com --- drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 5dd3f1cd074a1d..a8905049b9da02 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -946,7 +946,9 @@ static int analogix_dp_get_modes(struct drm_connector *connector) return 0; } + pm_runtime_get_sync(dp->dev); edid = drm_get_edid(connector, &dp->aux.ddc); + pm_runtime_put(dp->dev); if (edid) { drm_mode_connector_update_edid_property(&dp->connector, edid); From 4b929631c110645b46115d67172d590b210fa8c1 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Wed, 22 Nov 2017 14:14:47 +0100 Subject: [PATCH 0494/3326] drm/exynos: gem: Drop NONCONTIG flag for buffers allocated without IOMMU commit 120a264f9c2782682027d931d83dcbd22e01da80 upstream. When no IOMMU is available, all GEM buffers allocated by Exynos DRM driver are contiguous, because of the underlying dma_alloc_attrs() function provides only such buffers. In such case it makes no sense to keep BO_NONCONTIG flag for the allocated GEM buffers. This allows to avoid failures for buffer contiguity checks in the subsequent operations on GEM objects. Signed-off-by: Marek Szyprowski Signed-off-by: Inki Dae Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/exynos/exynos_drm_gem.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 077de014d61017..4400efe3974a30 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -247,6 +247,15 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev, if (IS_ERR(exynos_gem)) return exynos_gem; + if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) { + /* + * when no IOMMU is available, all allocated buffers are + * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag + */ + flags &= ~EXYNOS_BO_NONCONTIG; + DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n"); + } + /* set memory type and cache attribute from user side. */ exynos_gem->flags = flags; From e547af2582b53354073790df012ba3f852526410 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 29 Nov 2017 17:37:30 +0200 Subject: [PATCH 0495/3326] drm/i915: Fix vblank timestamp/frame counter jumps on gen2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a87e55f89f0b0dc541d89248a8445635936a3858 upstream. Previously I was under the impression that the scanline counter reads 0 when the pipe is off. Turns out that's not correct, and instead the scanline counter simply stops when the pipe stops, and it retains it's last value until the pipe starts up again, at which point the scanline counter jumps to vblank start. These jumps can cause the timestamp to jump backwards by one frame. Since we use the timestamps to guesstimage also the frame counter value on gen2, that would cause the frame counter to also jump backwards, which leads to a massice difference from the previous value. The end result is that flips/vblank events don't appear to complete as they're stuck waiting for the frame counter to catch up to that massive difference. Fix the problem properly by actually making sure the scanline counter has started to move before we assume that it's safe to enable vblank processing. v2: Less pointless duplication in the code (Chris) Cc: Daniel Vetter Cc: Chris Wilson Reviewed-by: Chris Wilson Fixes: b7792d8b54cc ("drm/i915: Wait for pipe to start before sampling vblank timestamps on gen2") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20171129153732.3612-1-ville.syrjala@linux.intel.com (cherry picked from commit 8fedd64dabc86d0f31a0d1e152be3aa23c323553) Signed-off-by: Joonas Lahtinen Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_display.c | 51 +++++++++++++++++++--------- 1 file changed, 35 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5ebdb63330ddf1..1c73d55426819f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1000,7 +1000,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, return crtc->config->cpu_transcoder; } -static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe) +static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, + enum pipe pipe) { i915_reg_t reg = PIPEDSL(pipe); u32 line1, line2; @@ -1015,7 +1016,28 @@ static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe) msleep(5); line2 = I915_READ(reg) & line_mask; - return line1 == line2; + return line1 != line2; +} + +static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + /* Wait for the display line to settle/start moving */ + if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) + DRM_ERROR("pipe %c scanline %s wait timed out\n", + pipe_name(pipe), onoff(state)); +} + +static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) +{ + wait_for_pipe_scanline_moving(crtc, false); +} + +static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) +{ + wait_for_pipe_scanline_moving(crtc, true); } /* @@ -1038,7 +1060,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; - enum pipe pipe = crtc->pipe; if (INTEL_GEN(dev_priv) >= 4) { i915_reg_t reg = PIPECONF(cpu_transcoder); @@ -1049,9 +1070,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 100)) WARN(1, "pipe_off wait timed out\n"); } else { - /* Wait for the display line to settle */ - if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100)) - WARN(1, "pipe_off wait timed out\n"); + intel_wait_for_pipe_scanline_stopped(crtc); } } @@ -1944,15 +1963,14 @@ static void intel_enable_pipe(struct intel_crtc *crtc) POSTING_READ(reg); /* - * Until the pipe starts DSL will read as 0, which would cause - * an apparent vblank timestamp jump, which messes up also the - * frame count when it's derived from the timestamps. So let's - * wait for the pipe to start properly before we call - * drm_crtc_vblank_on() + * Until the pipe starts PIPEDSL reads will return a stale value, + * which causes an apparent vblank timestamp jump when PIPEDSL + * resets to its proper value. That also messes up the frame count + * when it's derived from the timestamps. So let's wait for the + * pipe to start properly before we call drm_crtc_vblank_on() */ - if (dev->max_vblank_count == 0 && - wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50)) - DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe)); + if (dev->max_vblank_count == 0) + intel_wait_for_pipe_scanline_moving(crtc); } /** @@ -14682,6 +14700,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", pipe_name(pipe)); @@ -14691,8 +14711,7 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) I915_WRITE(PIPECONF(pipe), 0); POSTING_READ(PIPECONF(pipe)); - if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100)) - DRM_ERROR("pipe %c off wait timed out\n", pipe_name(pipe)); + intel_wait_for_pipe_scanline_stopped(crtc); I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); POSTING_READ(DPLL(pipe)); From 9a11204d2b26324636ff54f8d28095ed5dd17e91 Mon Sep 17 00:00:00 2001 From: Laurent Caumont Date: Sat, 11 Nov 2017 12:44:46 -0500 Subject: [PATCH 0496/3326] media: dvb: i2c transfers over usb cannot be done from stack commit 6d33377f2abbf9f0e561b116dd468d1c3ff36a6a upstream. Signed-off-by: Laurent Caumont Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/dvb-usb/dibusb-common.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c index 8207e6900656be..bcacb0f220282d 100644 --- a/drivers/media/usb/dvb-usb/dibusb-common.c +++ b/drivers/media/usb/dvb-usb/dibusb-common.c @@ -223,8 +223,20 @@ EXPORT_SYMBOL(dibusb_i2c_algo); int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val) { - u8 wbuf[1] = { offs }; - return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1); + u8 *buf; + int rc; + + buf = kmalloc(2, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf[0] = offs; + + rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1); + *val = buf[1]; + kfree(buf); + + return rc; } EXPORT_SYMBOL(dibusb_read_eeprom_byte); From 2f2241083a773b52e3319103b0ba2d1331b1f6c0 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Wed, 8 Nov 2017 16:19:45 -0500 Subject: [PATCH 0497/3326] media: rc: sir_ir: detect presence of port commit 30b4e122d71cbec2944a5f8b558b88936ee42f10 upstream. Without this test, sir_ir clumsy claims resources for a device which does not exist. The 0-day kernel test robot reports the following errors (in a loop): sir_ir sir_ir.0: Trapped in interrupt genirq: Flags mismatch irq 4. 00000000 (ttyS0) vs. 00000000 (sir_ir) When sir_ir is loaded with the default io and irq, the following happens: - sir_ir claims irq 4 - user space opens /dev/ttyS0 - in serial8250_do_startup(), some setup is done for ttyS0, which causes irq 4 to fire (in THRE test) - sir_ir does not realise it was not for it, and spins until the "trapped in interrupt" - now serial driver calls setup_irq() and fails and we get the "Flags mismatch" error. There is no port present at 0x3e8 so simply check for the presence of a port, as suggested by Linus. Reported-by: kbuild test robot Tested-by: Fengguang Wu Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/rc/sir_ir.c | 40 +++++++++++++++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 4 deletions(-) diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c index bc906fb128d513..d59918878eb268 100644 --- a/drivers/media/rc/sir_ir.c +++ b/drivers/media/rc/sir_ir.c @@ -57,7 +57,7 @@ static void add_read_queue(int flag, unsigned long val); static irqreturn_t sir_interrupt(int irq, void *dev_id); static void send_space(unsigned long len); static void send_pulse(unsigned long len); -static void init_hardware(void); +static int init_hardware(void); static void drop_hardware(void); /* Initialisation */ @@ -263,11 +263,36 @@ static void send_pulse(unsigned long len) } } -static void init_hardware(void) +static int init_hardware(void) { + u8 scratch, scratch2, scratch3; unsigned long flags; spin_lock_irqsave(&hardware_lock, flags); + + /* + * This is a simple port existence test, borrowed from the autoconfig + * function in drivers/tty/serial/8250/8250_port.c + */ + scratch = sinp(UART_IER); + soutp(UART_IER, 0); +#ifdef __i386__ + outb(0xff, 0x080); +#endif + scratch2 = sinp(UART_IER) & 0x0f; + soutp(UART_IER, 0x0f); +#ifdef __i386__ + outb(0x00, 0x080); +#endif + scratch3 = sinp(UART_IER) & 0x0f; + soutp(UART_IER, scratch); + if (scratch2 != 0 || scratch3 != 0x0f) { + /* we fail, there's nothing here */ + spin_unlock_irqrestore(&hardware_lock, flags); + pr_err("port existence test failed, cannot continue\n"); + return -ENODEV; + } + /* reset UART */ outb(0, io + UART_MCR); outb(0, io + UART_IER); @@ -285,6 +310,8 @@ static void init_hardware(void) /* turn on UART */ outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR); spin_unlock_irqrestore(&hardware_lock, flags); + + return 0; } static void drop_hardware(void) @@ -334,14 +361,19 @@ static int sir_ir_probe(struct platform_device *dev) pr_err("IRQ %d already in use.\n", irq); return retval; } + + retval = init_hardware(); + if (retval) { + del_timer_sync(&timerlist); + return retval; + } + pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq); retval = devm_rc_register_device(&sir_ir_dev->dev, rcdev); if (retval < 0) return retval; - init_hardware(); - return 0; } From f889ad87b2147dbde1d81715175cf44bdee91fab Mon Sep 17 00:00:00 2001 From: Sean Young Date: Sun, 19 Nov 2017 16:57:27 -0500 Subject: [PATCH 0498/3326] media: rc: partial revert of "media: rc: per-protocol repeat period" commit 67f0f15ad5c47490e19f2526f8f9cea97c5ce1a6 upstream. Since commit d57ea877af38 ("media: rc: per-protocol repeat period"), most IR protocols have a lower keyup timeout. This causes problems on the ite-cir, which has default IR timeout of 200ms. Since the IR decoders read the trailing space, with a IR timeout of 200ms, the last keydown will have at least a delay of 200ms. This is more than the protocol timeout of e.g. rc-6 (which is 164ms). As a result the last IR will be interpreted as a new keydown event, and we get two keypresses. Revert the protocol timeout to 250ms, except for cec which needs a timeout of 550ms. Fixes: d57ea877af38 ("media: rc: per-protocol repeat period") Reported-by: Matthias Reichl Signed-off-by: Sean Young Tested-by: Matthias Reichl Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/rc/rc-main.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 981cccd6b988e1..72f381522cb26d 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -38,41 +38,41 @@ static const struct { [RC_PROTO_UNKNOWN] = { .name = "unknown", .repeat_period = 250 }, [RC_PROTO_OTHER] = { .name = "other", .repeat_period = 250 }, [RC_PROTO_RC5] = { .name = "rc-5", - .scancode_bits = 0x1f7f, .repeat_period = 164 }, + .scancode_bits = 0x1f7f, .repeat_period = 250 }, [RC_PROTO_RC5X_20] = { .name = "rc-5x-20", - .scancode_bits = 0x1f7f3f, .repeat_period = 164 }, + .scancode_bits = 0x1f7f3f, .repeat_period = 250 }, [RC_PROTO_RC5_SZ] = { .name = "rc-5-sz", - .scancode_bits = 0x2fff, .repeat_period = 164 }, + .scancode_bits = 0x2fff, .repeat_period = 250 }, [RC_PROTO_JVC] = { .name = "jvc", .scancode_bits = 0xffff, .repeat_period = 250 }, [RC_PROTO_SONY12] = { .name = "sony-12", - .scancode_bits = 0x1f007f, .repeat_period = 100 }, + .scancode_bits = 0x1f007f, .repeat_period = 250 }, [RC_PROTO_SONY15] = { .name = "sony-15", - .scancode_bits = 0xff007f, .repeat_period = 100 }, + .scancode_bits = 0xff007f, .repeat_period = 250 }, [RC_PROTO_SONY20] = { .name = "sony-20", - .scancode_bits = 0x1fff7f, .repeat_period = 100 }, + .scancode_bits = 0x1fff7f, .repeat_period = 250 }, [RC_PROTO_NEC] = { .name = "nec", - .scancode_bits = 0xffff, .repeat_period = 160 }, + .scancode_bits = 0xffff, .repeat_period = 250 }, [RC_PROTO_NECX] = { .name = "nec-x", - .scancode_bits = 0xffffff, .repeat_period = 160 }, + .scancode_bits = 0xffffff, .repeat_period = 250 }, [RC_PROTO_NEC32] = { .name = "nec-32", - .scancode_bits = 0xffffffff, .repeat_period = 160 }, + .scancode_bits = 0xffffffff, .repeat_period = 250 }, [RC_PROTO_SANYO] = { .name = "sanyo", .scancode_bits = 0x1fffff, .repeat_period = 250 }, [RC_PROTO_MCIR2_KBD] = { .name = "mcir2-kbd", - .scancode_bits = 0xffff, .repeat_period = 150 }, + .scancode_bits = 0xffff, .repeat_period = 250 }, [RC_PROTO_MCIR2_MSE] = { .name = "mcir2-mse", - .scancode_bits = 0x1fffff, .repeat_period = 150 }, + .scancode_bits = 0x1fffff, .repeat_period = 250 }, [RC_PROTO_RC6_0] = { .name = "rc-6-0", - .scancode_bits = 0xffff, .repeat_period = 164 }, + .scancode_bits = 0xffff, .repeat_period = 250 }, [RC_PROTO_RC6_6A_20] = { .name = "rc-6-6a-20", - .scancode_bits = 0xfffff, .repeat_period = 164 }, + .scancode_bits = 0xfffff, .repeat_period = 250 }, [RC_PROTO_RC6_6A_24] = { .name = "rc-6-6a-24", - .scancode_bits = 0xffffff, .repeat_period = 164 }, + .scancode_bits = 0xffffff, .repeat_period = 250 }, [RC_PROTO_RC6_6A_32] = { .name = "rc-6-6a-32", - .scancode_bits = 0xffffffff, .repeat_period = 164 }, + .scancode_bits = 0xffffffff, .repeat_period = 250 }, [RC_PROTO_RC6_MCE] = { .name = "rc-6-mce", - .scancode_bits = 0xffff7fff, .repeat_period = 164 }, + .scancode_bits = 0xffff7fff, .repeat_period = 250 }, [RC_PROTO_SHARP] = { .name = "sharp", .scancode_bits = 0x1fff, .repeat_period = 250 }, [RC_PROTO_XMP] = { .name = "xmp", .repeat_period = 250 }, From c4e71b6f7fd75ff372e766e01aded59d869d7cec Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Thu, 16 Nov 2017 17:58:20 +0000 Subject: [PATCH 0499/3326] arm64: KVM: fix VTTBR_BADDR_MASK BUG_ON off-by-one commit 26aa7b3b1c0fb3f1a6176a0c1847204ef4355693 upstream. VTTBR_BADDR_MASK is used to sanity check the size and alignment of the VTTBR address. It seems to currently be off by one, thereby only allowing up to 47-bit addresses (instead of 48-bit) and also insufficiently checking the alignment. This patch fixes it. As an example, with 4k pages, before this patch we have: PHYS_MASK_SHIFT = 48 VTTBR_X = 37 - 24 = 13 VTTBR_BADDR_SHIFT = 13 - 1 = 12 VTTBR_BADDR_MASK = ((1 << 35) - 1) << 12 = 0x00007ffffffff000 Which is wrong, because the mask doesn't allow bit 47 of the VTTBR address to be set, and only requires the address to be 12-bit (4k) aligned, while it actually needs to be 13-bit (8k) aligned because we concatenate two 4k tables. With this patch, the mask becomes 0x0000ffffffffe000, which is what we want. Fixes: 0369f6a34b9f ("arm64: KVM: EL2 register definitions") Reviewed-by: Suzuki K Poulose Reviewed-by: Christoffer Dall Signed-off-by: Kristina Martsenko Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/kvm_arm.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 61d694c2eae5bc..555d463c0eaadc 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -170,8 +170,7 @@ #define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS) #define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA) -#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) -#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) +#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X) #define VTTBR_VMID_SHIFT (UL(48)) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) From ba8cbedca6dfca432db2eb9ed4013a2dcb358a71 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 16 Nov 2017 17:58:21 +0000 Subject: [PATCH 0500/3326] arm: KVM: Fix VTTBR_BADDR_MASK BUG_ON off-by-one commit 5553b142be11e794ebc0805950b2e8313f93d718 upstream. VTTBR_BADDR_MASK is used to sanity check the size and alignment of the VTTBR address. It seems to currently be off by one, thereby only allowing up to 39-bit addresses (instead of 40-bit) and also insufficiently checking the alignment. This patch fixes it. This patch is the 32bit pendent of Kristina's arm64 fix, and she deserves the actual kudos for pinpointing that one. Fixes: f7ed45be3ba52 ("KVM: ARM: World-switch implementation") Reported-by: Kristina Martsenko Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/kvm_arm.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index c8781450905be9..3ab8b3781bfeca 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -161,8 +161,7 @@ #else #define VTTBR_X (5 - KVM_T0SZ) #endif -#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) -#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) +#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X) #define VTTBR_VMID_SHIFT _AC(48, ULL) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) From a52c2829cd60492fc75bafc323145cab1af915f5 Mon Sep 17 00:00:00 2001 From: Andrew Honig Date: Fri, 1 Dec 2017 10:21:09 -0800 Subject: [PATCH 0501/3326] KVM: VMX: remove I/O port 0x80 bypass on Intel hosts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d59d51f088014f25c2562de59b9abff4f42a7468 upstream. This fixes CVE-2017-1000407. KVM allows guests to directly access I/O port 0x80 on Intel hosts. If the guest floods this port with writes it generates exceptions and instability in the host kernel, leading to a crash. With this change guest writes to port 0x80 on Intel will behave the same as they currently behave on AMD systems. Prevent the flooding by removing the code that sets port 0x80 as a passthrough port. This is essentially the same as upstream patch 99f85a28a78e96d28907fe036e1671a218fee597, except that patch was for AMD chipsets and this patch is for Intel. Signed-off-by: Andrew Honig Signed-off-by: Jim Mattson Fixes: fdef3ad1b386 ("KVM: VMX: Enable io bitmaps to avoid IO port 0x80 VMEXITs") Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b21113bcf227b2..f366e6d3a5e1f5 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6750,12 +6750,7 @@ static __init int hardware_setup(void) memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); - /* - * Allow direct access to the PC debug port (it is often used for I/O - * delays, but the vmexits simply slow things down). - */ memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); - clear_bit(0x80, vmx_io_bitmap_a); memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); From 73c4af9627c0914b81832e162deb34f4894760cb Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sun, 3 Dec 2017 23:54:41 +0100 Subject: [PATCH 0502/3326] KVM: arm/arm64: Fix broken GICH_ELRSR big endian conversion commit fc396e066318c0a02208c1d3f0b62950a7714999 upstream. We are incorrectly rearranging 32-bit words inside a 64-bit typed value for big endian systems, which would result in never marking a virtual interrupt as inactive on big endian systems (assuming 32 or fewer LRs on the hardware). Fix this by not doing any word order manipulation for the typed values. Acked-by: Christoffer Dall Signed-off-by: Christoffer Dall Signed-off-by: Greg Kroah-Hartman --- virt/kvm/arm/hyp/vgic-v2-sr.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c index a3f18d3623661a..d7fd46fe9efb35 100644 --- a/virt/kvm/arm/hyp/vgic-v2-sr.c +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c @@ -34,11 +34,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base) else elrsr1 = 0; -#ifdef CONFIG_CPU_BIG_ENDIAN - cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; -#else cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; -#endif } static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) From af85c1e04ec5737d595a9b4d0af0ef72a3ceb23b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 16 Nov 2017 17:58:15 +0000 Subject: [PATCH 0503/3326] KVM: arm/arm64: vgic-irqfd: Fix MSI entry allocation commit 150009e2c70cc3c6e97f00e7595055765d32fb85 upstream. Using the size of the structure we're allocating is a good idea and avoids any surprise... In this case, we're happilly confusing kvm_kernel_irq_routing_entry and kvm_irq_routing_entry... Fixes: 95b110ab9a09 ("KVM: arm/arm64: Enable irqchip routing") Reported-by: AKASHI Takahiro Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Greg Kroah-Hartman --- virt/kvm/arm/vgic/vgic-irqfd.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c index b7baf581611ae8..99e026d2dade9b 100644 --- a/virt/kvm/arm/vgic/vgic-irqfd.c +++ b/virt/kvm/arm/vgic/vgic-irqfd.c @@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm) u32 nr = dist->nr_spis; int i, ret; - entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry), - GFP_KERNEL); + entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL); if (!entries) return -ENOMEM; From c6c0913bd13f89d1a875cc7d85b3646840bead07 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 16 Nov 2017 17:58:16 +0000 Subject: [PATCH 0504/3326] KVM: arm/arm64: vgic: Preserve the revious read from the pending table commit ddb4b0102cb9cdd2398d98b3e1e024e08a2f4239 upstream. The current pending table parsing code assumes that we keep the previous read of the pending bits, but keep that variable in the current block, making sure it is discarded on each loop. We end-up using whatever is on the stack. Who knows, it might just be the right thing... Fixes: 280771252c1ba ("KVM: arm64: vgic-v3: KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES") Reported-by: AKASHI Takahiro Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Greg Kroah-Hartman --- virt/kvm/arm/vgic/vgic-v3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 96ea597db0e776..502f2100e7bfa6 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -324,13 +324,13 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) int last_byte_offset = -1; struct vgic_irq *irq; int ret; + u8 val; list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { int byte_offset, bit_nr; struct kvm_vcpu *vcpu; gpa_t pendbase, ptr; bool stored; - u8 val; vcpu = irq->target_vcpu; if (!vcpu) From fdbc5f3c5ece0a5db265537185d3bb7ff3b52d0d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 16 Nov 2017 17:58:18 +0000 Subject: [PATCH 0505/3326] KVM: arm/arm64: vgic-its: Check result of allocation before use commit 686f294f2f1ae40705283dd413ca1e4c14f20f93 upstream. We miss a test against NULL after allocation. Fixes: 6d03a68f8054 ("KVM: arm64: vgic-its: Turn device_id validation into generic ID validation") Reported-by: AKASHI Takahiro Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Greg Kroah-Hartman --- virt/kvm/arm/vgic/vgic-its.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 547f12dc4d543b..3108e07526af50 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -775,6 +775,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its, return E_ITS_MAPC_COLLECTION_OOR; collection = kzalloc(sizeof(*collection), GFP_KERNEL); + if (!collection) + return -ENOMEM; collection->collection_id = coll_id; collection->target_addr = COLLECTION_NOT_MAPPED; From d0e9c7727224011c17e37f3415daec8dd7685ca7 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 5 Dec 2017 14:56:42 +0000 Subject: [PATCH 0506/3326] arm64: fpsimd: Prevent registers leaking from dead tasks commit 071b6d4a5d343046f253a5a8835d477d93992002 upstream. Currently, loading of a task's fpsimd state into the CPU registers is skipped if that task's state is already present in the registers of that CPU. However, the code relies on the struct fpsimd_state * (and by extension struct task_struct *) to unambiguously identify a task. There is a particular case in which this doesn't work reliably: when a task exits, its task_struct may be recycled to describe a new task. Consider the following scenario: 1) Task P loads its fpsimd state onto cpu C. per_cpu(fpsimd_last_state, C) := P; P->thread.fpsimd_state.cpu := C; 2) Task X is scheduled onto C and loads its fpsimd state on C. per_cpu(fpsimd_last_state, C) := X; X->thread.fpsimd_state.cpu := C; 3) X exits, causing X's task_struct to be freed. 4) P forks a new child T, which obtains X's recycled task_struct. T == X. T->thread.fpsimd_state.cpu == C (inherited from P). 5) T is scheduled on C. T's fpsimd state is not loaded, because per_cpu(fpsimd_last_state, C) == T (== X) && T->thread.fpsimd_state.cpu == C. (This is the check performed by fpsimd_thread_switch().) So, T gets X's registers because the last registers loaded onto C were those of X, in (2). This patch fixes the problem by ensuring that the sched-in check fails in (5): fpsimd_flush_task_state(T) is called when T is forked, so that T->thread.fpsimd_state.cpu == C cannot be true. This relies on the fact that T is not schedulable until after copy_thread() completes. Once T's fpsimd state has been loaded on some CPU C there may still be other cpus D for which per_cpu(fpsimd_last_state, D) == &X->thread.fpsimd_state. But D is necessarily != C in this case, and the check in (5) must fail. An alternative fix would be to do refcounting on task_struct. This would result in each CPU holding a reference to the last task whose fpsimd state was loaded there. It's not clear whether this is preferable, and it involves higher overhead than the fix proposed in this patch. It would also move all the task_struct freeing work into the context switch critical section, or otherwise some deferred cleanup mechanism would need to be introduced, neither of which seems obviously justified. Fixes: 005f78cd8849 ("arm64: defer reloading a task's FPSIMD state to userland resume") Signed-off-by: Dave Martin Reviewed-by: Ard Biesheuvel [will: word-smithed the comment so it makes more sense] Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/process.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 2dc0f84822109b..bcd22d7ee59023 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -258,6 +258,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); + /* + * In case p was allocated the same task_struct pointer as some + * other recently-exited task, make sure p is disassociated from + * any cpu that may have run that now-exited task recently. + * Otherwise we could erroneously skip reloading the FPSIMD + * registers for p. + */ + fpsimd_flush_task_state(p); + if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->regs[0] = 0; From a5347596586db3ab5201ff24be75286dd911f897 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 6 Dec 2017 10:42:10 +0000 Subject: [PATCH 0507/3326] arm64: SW PAN: Point saved ttbr0 at the zero page when switching to init_mm commit 0adbdfde8cfc9415aeed2a4955d2d17b3bd9bf13 upstream. update_saved_ttbr0 mandates that mm->pgd is not swapper, since swapper contains kernel mappings and should never be installed into ttbr0. However, this means that callers must avoid passing the init_mm to update_saved_ttbr0 which in turn can cause the saved ttbr0 value to be out-of-date in the context of the idle thread. For example, EFI runtime services may leave the saved ttbr0 pointing at the EFI page table, and kernel threads may end up with stale references to freed page tables. This patch changes update_saved_ttbr0 so that the init_mm points the saved ttbr0 value to the empty zero page, which always exists and never contains valid translations. EFI and switch can then call into update_saved_ttbr0 unconditionally. Cc: Mark Rutland Cc: Ard Biesheuvel Cc: Vinayak Menon Fixes: 39bc88e5e38e9b21 ("arm64: Disable TTBR0_EL1 during normal kernel execution") Reviewed-by: Catalin Marinas Reviewed-by: Mark Rutland Reported-by: Vinayak Menon Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/efi.h | 4 +--- arch/arm64/include/asm/mmu_context.h | 22 +++++++++++++--------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 650344d0112497..c4cd5081d78bc4 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -132,11 +132,9 @@ static inline void efi_set_pgd(struct mm_struct *mm) * Defer the switch to the current thread's TTBR0_EL1 * until uaccess_enable(). Restore the current * thread's saved ttbr0 corresponding to its active_mm - * (if different from init_mm). */ cpu_set_reserved_ttbr0(); - if (current->active_mm != &init_mm) - update_saved_ttbr0(current, current->active_mm); + update_saved_ttbr0(current, current->active_mm); } } } diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 3257895a9b5e41..f7773f90546e70 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -174,11 +174,17 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void update_saved_ttbr0(struct task_struct *tsk, struct mm_struct *mm) { - if (system_uses_ttbr0_pan()) { - BUG_ON(mm->pgd == swapper_pg_dir); - task_thread_info(tsk)->ttbr0 = - virt_to_phys(mm->pgd) | ASID(mm) << 48; - } + u64 ttbr; + + if (!system_uses_ttbr0_pan()) + return; + + if (mm == &init_mm) + ttbr = __pa_symbol(empty_zero_page); + else + ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; + + task_thread_info(tsk)->ttbr0 = ttbr; } #else static inline void update_saved_ttbr0(struct task_struct *tsk, @@ -214,11 +220,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, * Update the saved TTBR0_EL1 of the scheduled-in task as the previous * value may have not been initialised yet (activate_mm caller) or the * ASID has changed since the last run (following the context switch - * of another thread of the same process). Avoid setting the reserved - * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit). + * of another thread of the same process). */ - if (next != &init_mm) - update_saved_ttbr0(tsk, next); + update_saved_ttbr0(tsk, next); } #define deactivate_mm(tsk,mm) do { } while (0) From e7ef4e829fe1a7e783a5e8233cde4889f4b65729 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 6 Dec 2017 10:51:12 +0000 Subject: [PATCH 0508/3326] arm64: SW PAN: Update saved ttbr0 value on enter_lazy_tlb commit d96cc49bff5a7735576cc6f6f111f875d101cec8 upstream. enter_lazy_tlb is called when a kernel thread rides on the back of another mm, due to a context switch or an explicit call to unuse_mm where a call to switch_mm is elided. In these cases, it's important to keep the saved ttbr value up to date with the active mm, otherwise we can end up with a stale value which points to a potentially freed page table. This patch implements enter_lazy_tlb for arm64, so that the saved ttbr0 is kept up-to-date with the active mm for kernel threads. Cc: Mark Rutland Cc: Ard Biesheuvel Cc: Vinayak Menon Fixes: 39bc88e5e38e9b21 ("arm64: Disable TTBR0_EL1 during normal kernel execution") Reviewed-by: Catalin Marinas Reviewed-by: Mark Rutland Reported-by: Vinayak Menon Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/mmu_context.h | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index f7773f90546e70..9d155fa9a50791 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -156,20 +156,6 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) -/* - * This is called when "tsk" is about to enter lazy TLB mode. - * - * mm: describes the currently active mm context - * tsk: task which is entering lazy tlb - * cpu: cpu number which is entering lazy tlb - * - * tsk->mm will be NULL - */ -static inline void -enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -} - #ifdef CONFIG_ARM64_SW_TTBR0_PAN static inline void update_saved_ttbr0(struct task_struct *tsk, struct mm_struct *mm) @@ -193,6 +179,16 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, } #endif +static inline void +enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ + /* + * We don't actually care about the ttbr0 mapping, so point it at the + * zero page. + */ + update_saved_ttbr0(tsk, &init_mm); +} + static inline void __switch_mm(struct mm_struct *next) { unsigned int cpu = smp_processor_id(); From 1d6c92408159ccfdc60cd988fe229804447f9c7e Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Wed, 15 Nov 2017 10:03:53 -0200 Subject: [PATCH 0509/3326] Revert "ARM: dts: imx53: add srtc node" commit e501506d3ea00eefa64463ebd9e5c13ee70990bd upstream. This reverts commit 5b725054147deaf966b3919e10a86c6bfe946a18. The rtc block on i.MX53 is a completely different hardware than the one found on i.MX25. Reported-by: Noel Vellemans Suggested-by: Juergen Borleis Signed-off-by: Fabio Estevam Signed-off-by: Shawn Guo Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/imx53.dtsi | 9 --------- 1 file changed, 9 deletions(-) diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 8bf0d89cdd355c..2e516f4985e4cd 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -433,15 +433,6 @@ clock-names = "ipg", "per"; }; - srtc: srtc@53fa4000 { - compatible = "fsl,imx53-rtc", "fsl,imx25-rtc"; - reg = <0x53fa4000 0x4000>; - interrupts = <24>; - interrupt-parent = <&tzic>; - clocks = <&clks IMX5_CLK_SRTC_GATE>; - clock-names = "ipg"; - }; - iomuxc: iomuxc@53fa8000 { compatible = "fsl,imx53-iomuxc"; reg = <0x53fa8000 0x4000>; From 2ced9e2a850c40d3bfd7af4791f985318facff68 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 3 Oct 2017 18:14:13 +0100 Subject: [PATCH 0510/3326] bus: arm-cci: Fix use of smp_processor_id() in preemptible context commit 4608af8aa53e7f3922ddee695d023b7bcd5cb35b upstream. The ARM CCI driver seem to be using smp_processor_id() in a preemptible context, which is likely to make a DEBUG_PREMPT kernel scream at boot time. Turn this into a get_cpu()/put_cpu() that extends over the CPU hotplug registration, making sure that we don't race against a CPU down operation. Signed-off-by: Marc Zyngier Acked-by: Mark Rutland Signed-off-by: Pawel Moll Signed-off-by: Greg Kroah-Hartman --- drivers/bus/arm-cci.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 3c29d36702a8ec..5426c04fe24bc8 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -1755,14 +1755,17 @@ static int cci_pmu_probe(struct platform_device *pdev) raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); mutex_init(&cci_pmu->reserve_mutex); atomic_set(&cci_pmu->active_events, 0); - cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus); + cpumask_set_cpu(get_cpu(), &cci_pmu->cpus); ret = cci_pmu_init(cci_pmu, pdev); - if (ret) + if (ret) { + put_cpu(); return ret; + } cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, &cci_pmu->node); + put_cpu(); pr_info("ARM %s PMU driver probed", cci_pmu->model->name); return 0; } From a724b569a50d2e94fd2d3ea123e1b5ad32293c61 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 27 Aug 2017 11:06:50 +0100 Subject: [PATCH 0511/3326] bus: arm-ccn: Check memory allocation failure commit 24771179c5c138f0ea3ef88b7972979f62f2d5db upstream. Check memory allocation failures and return -ENOMEM in such cases This avoids a potential NULL pointer dereference. Signed-off-by: Christophe JAILLET Acked-by: Scott Branden Signed-off-by: Pawel Moll Signed-off-by: Greg Kroah-Hartman --- drivers/bus/arm-ccn.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index e8c6946fed9d26..be4bdead407545 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -1271,6 +1271,10 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id); name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL); + if (!name) { + err = -ENOMEM; + goto error_choose_name; + } snprintf(name, len + 1, "ccn_%d", ccn->dt.id); } @@ -1318,6 +1322,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) error_pmu_register: error_set_affinity: +error_choose_name: ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); for (i = 0; i < ccn->num_xps; i++) writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); From 8741b5ab49403be43771adc57e1c0031514910b9 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 3 Oct 2017 18:14:12 +0100 Subject: [PATCH 0512/3326] bus: arm-ccn: Fix use of smp_processor_id() in preemptible context commit b18c2b9487d8e797fc0a757e57ac3645348c5fba upstream. Booting a DEBUG_PREEMPT enabled kernel on a CCN-based system results in the following splat: [...] arm-ccn e8000000.ccn: No access to interrupts, using timer. BUG: using smp_processor_id() in preemptible [00000000] code: swapper/0/1 caller is debug_smp_processor_id+0x1c/0x28 CPU: 1 PID: 1 Comm: swapper/0 Not tainted 4.13.0 #6111 Hardware name: AMD Seattle/Seattle, BIOS 17:08:23 Jun 26 2017 Call trace: [] dump_backtrace+0x0/0x278 [] show_stack+0x24/0x30 [] dump_stack+0x8c/0xb0 [] check_preemption_disabled+0xfc/0x100 [] debug_smp_processor_id+0x1c/0x28 [] arm_ccn_probe+0x358/0x4f0 [...] as we use smp_processor_id() in the wrong context. Turn this into a get_cpu()/put_cpu() that extends over the CPU hotplug registration, making sure that we don't race against a CPU down operation. Signed-off-by: Marc Zyngier Acked-by: Mark Rutland Signed-off-by: Pawel Moll Signed-off-by: Greg Kroah-Hartman --- drivers/bus/arm-ccn.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index be4bdead407545..66cb12655e8406 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -1301,7 +1301,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) } /* Pick one CPU which we will use to collect data from CCN... */ - cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu); + cpumask_set_cpu(get_cpu(), &ccn->dt.cpu); /* Also make sure that the overflow interrupt is handled by this CPU */ if (ccn->irq) { @@ -1318,10 +1318,12 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, &ccn->dt.node); + put_cpu(); return 0; error_pmu_register: error_set_affinity: + put_cpu(); error_choose_name: ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); for (i = 0; i < ccn->num_xps; i++) From 1cef55be2b6bba7f4c262b35db385425ae1bf04e Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Wed, 11 Oct 2017 22:33:24 +0100 Subject: [PATCH 0513/3326] bus: arm-ccn: fix module unloading Error: Removing state 147 which has instances left. commit b69f63ebf553504739cc8534cbed31bd530c6f0b upstream. Unregistering the driver before calling cpuhp_remove_multi_state() removes any remaining hotplug cpu instances so __cpuhp_remove_state_cpuslocked() doesn't emit this warning: [ 268.748362] Error: Removing state 147 which has instances left. [ 268.748373] ------------[ cut here ]------------ [ 268.748386] WARNING: CPU: 2 PID: 5476 at kernel/cpu.c:1734 __cpuhp_remove_state_cpuslocked+0x454/0x4f0 [ 268.748389] Modules linked in: arm_ccn(-) [last unloaded: arm_ccn] [ 268.748403] CPU: 2 PID: 5476 Comm: rmmod Tainted: G W 4.14.0-rc4+ #3 [ 268.748406] Hardware name: AMD Seattle/Seattle, BIOS 10:18:39 Dec 8 2016 [ 268.748410] task: ffff8001a18ca000 task.stack: ffff80019c120000 [ 268.748416] PC is at __cpuhp_remove_state_cpuslocked+0x454/0x4f0 [ 268.748421] LR is at __cpuhp_remove_state_cpuslocked+0x448/0x4f0 [ 268.748425] pc : [] lr : [] pstate: 60000145 [ 268.748427] sp : ffff80019c127d30 [ 268.748430] x29: ffff80019c127d30 x28: ffff8001a18ca000 [ 268.748437] x27: ffff20000c2cb000 x26: 1fffe4000042d490 [ 268.748443] x25: ffff20000216a480 x24: 0000000000000000 [ 268.748449] x23: ffff20000b08e000 x22: 0000000000000001 [ 268.748455] x21: 0000000000000093 x20: 00000000000016f8 [ 268.748460] x19: ffff20000c2cbb80 x18: 0000ffffb5fe7c58 [ 268.748466] x17: 00000000004402d0 x16: 1fffe40001864f01 [ 268.748472] x15: ffff20000c4bf8b0 x14: 0000000000000000 [ 268.748477] x13: 0000000000007032 x12: ffff20000829ae48 [ 268.748483] x11: ffff20000c4bf000 x10: 0000000000000004 [ 268.748488] x9 : 0000000000006fbc x8 : ffff20000c318a40 [ 268.748494] x7 : 0000000000000000 x6 : ffff040001864f02 [ 268.748500] x5 : 0000000000000000 x4 : 0000000000000000 [ 268.748505] x3 : 0000000000000007 x2 : dfff200000000000 [ 268.748510] x1 : 000000000000ad3d x0 : 00000000000001f0 [ 268.748516] Call trace: [ 268.748521] Exception stack(0xffff80019c127bf0 to 0xffff80019c127d30) [ 268.748526] 7be0: 00000000000001f0 000000000000ad3d [ 268.748531] 7c00: dfff200000000000 0000000000000007 0000000000000000 0000000000000000 [ 268.748535] 7c20: ffff040001864f02 0000000000000000 ffff20000c318a40 0000000000006fbc [ 268.748539] 7c40: 0000000000000004 ffff20000c4bf000 ffff20000829ae48 0000000000007032 [ 268.748544] 7c60: 0000000000000000 ffff20000c4bf8b0 1fffe40001864f01 00000000004402d0 [ 268.748548] 7c80: 0000ffffb5fe7c58 ffff20000c2cbb80 00000000000016f8 0000000000000093 [ 268.748553] 7ca0: 0000000000000001 ffff20000b08e000 0000000000000000 ffff20000216a480 [ 268.748557] 7cc0: 1fffe4000042d490 ffff20000c2cb000 ffff8001a18ca000 ffff80019c127d30 [ 268.748562] 7ce0: ffff2000081729e0 ffff80019c127d30 ffff2000081729ec 0000000060000145 [ 268.748566] 7d00: 00000000000001f0 0000000000000000 0001000000000000 0000000000000000 [ 268.748569] 7d20: ffff80019c127d30 ffff2000081729ec [ 268.748575] [] __cpuhp_remove_state_cpuslocked+0x454/0x4f0 [ 268.748580] [] __cpuhp_remove_state+0x54/0x80 [ 268.748597] [] arm_ccn_exit+0x2c/0x70 [arm_ccn] [ 268.748604] [] SyS_delete_module+0x5a4/0x708 [ 268.748607] Exception stack(0xffff80019c127ec0 to 0xffff80019c128000) [ 268.748612] 7ec0: 0000000019bb7258 0000000000000800 ba64d0fb3d26a800 00000000000000da [ 268.748616] 7ee0: 0000ffffb6144e28 0000ffffcd95b409 fefefefefefefeff 7f7f7f7f7f7f7f7f [ 268.748621] 7f00: 000000000000006a 1999999999999999 0000ffffb6179000 0000000000bbcc6d [ 268.748625] 7f20: 0000ffffb6176b98 0000ffffcd95c2d0 0000ffffb5fe7b58 0000ffffb6163000 [ 268.748630] 7f40: 0000ffffb60ad3e0 00000000004402d0 0000ffffb5fe7c58 0000000019bb71f0 [ 268.748634] 7f60: 0000ffffcd95c740 0000000000000000 0000000019bb71f0 0000000000416700 [ 268.748639] 7f80: 0000000000000000 00000000004402e8 0000000019bb6010 0000ffffcd95c748 [ 268.748643] 7fa0: 0000000000000000 0000ffffcd95c460 00000000004113a8 0000ffffcd95c460 [ 268.748648] 7fc0: 0000ffffb60ad3e8 0000000080000000 0000000019bb7258 000000000000006a [ 268.748652] 7fe0: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 [ 268.748657] [] __sys_trace_return+0x0/0x4 [ 268.748661] ---[ end trace a996d358dcaa7f9c ]--- Fixes: 8df038725ad5 ("bus/arm-ccn: Use cpu-hp's multi instance support instead custom list") Signed-off-by: Kim Phillips Acked-by: Sebastian Andrzej Siewior Signed-off-by: Pawel Moll Signed-off-by: Greg Kroah-Hartman --- drivers/bus/arm-ccn.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index 66cb12655e8406..03d7faf51c2bba 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -1587,8 +1587,8 @@ static int __init arm_ccn_init(void) static void __exit arm_ccn_exit(void) { - cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE); platform_driver_unregister(&arm_ccn_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE); } module_init(arm_ccn_init); From 45f846ca6b2119237adfa19a9bbfce43dadb558e Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 2 Nov 2017 15:22:27 +0200 Subject: [PATCH 0514/3326] IB/core: Avoid unnecessary return value check commit 2e4c85c6edc80fa532b2c7e1eb3597ef4d4bbb8f upstream. Since there is nothing done with non zero return value, such check is avoided. Signed-off-by: Parav Pandit Reviewed-by: Daniel Jurgens Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/security.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 28607bb42d873e..23278ed5be4517 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -697,20 +697,13 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) { - int ret; - if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) return -EACCES; - ret = ib_security_pkey_access(map->agent.device, - map->agent.port_num, - pkey_index, - map->agent.security); - - if (ret) - return ret; - - return 0; + return ib_security_pkey_access(map->agent.device, + map->agent.port_num, + pkey_index, + map->agent.security); } #endif /* CONFIG_SECURITY_INFINIBAND */ From 796c9d1e275e3e35ae13b16021498bc12ab2593e Mon Sep 17 00:00:00 2001 From: Daniel Jurgens Date: Wed, 29 Nov 2017 20:10:39 +0200 Subject: [PATCH 0515/3326] IB/core: Only enforce security for InfiniBand commit 315d160c5a4e034a576a13aa21e7235d5c9ec609 upstream. For now the only LSM security enforcement mechanism available is specific to InfiniBand. Bypass enforcement for non-IB link types. This fixes a regression where modify_qp fails for iWARP because querying the PKEY returns -EINVAL. Cc: Paul Moore Cc: Don Dutile Reported-by: Potnuri Bharat Teja Fixes: d291f1a65232("IB/core: Enforce PKey security on QPs") Fixes: 47a2b338fe63("IB/core: Enforce security on management datagrams") Signed-off-by: Daniel Jurgens Reviewed-by: Parav Pandit Tested-by: Potnuri Bharat Teja Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/security.c | 50 +++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 23278ed5be4517..a337386652b073 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -417,8 +417,17 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec) int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) { + u8 i = rdma_start_port(dev); + bool is_ib = false; int ret; + while (i <= rdma_end_port(dev) && !is_ib) + is_ib = rdma_protocol_ib(dev, i++); + + /* If this isn't an IB device don't create the security context */ + if (!is_ib) + return 0; + qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL); if (!qp->qp_sec) return -ENOMEM; @@ -441,6 +450,10 @@ EXPORT_SYMBOL(ib_create_qp_security); void ib_destroy_qp_security_begin(struct ib_qp_security *sec) { + /* Return if not IB */ + if (!sec) + return; + mutex_lock(&sec->mutex); /* Remove the QP from the lists so it won't get added to @@ -470,6 +483,10 @@ void ib_destroy_qp_security_abort(struct ib_qp_security *sec) int ret; int i; + /* Return if not IB */ + if (!sec) + return; + /* If a concurrent cache update is in progress this * QP security could be marked for an error state * transition. Wait for this to complete. @@ -505,6 +522,10 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec) { int i; + /* Return if not IB */ + if (!sec) + return; + /* If a concurrent cache update is occurring we must * wait until this QP security structure is processed * in the QP to error flow before destroying it because @@ -557,7 +578,7 @@ int ib_security_modify_qp(struct ib_qp *qp, { int ret = 0; struct ib_ports_pkeys *tmp_pps; - struct ib_ports_pkeys *new_pps; + struct ib_ports_pkeys *new_pps = NULL; struct ib_qp *real_qp = qp->real_qp; bool special_qp = (real_qp->qp_type == IB_QPT_SMI || real_qp->qp_type == IB_QPT_GSI || @@ -565,18 +586,27 @@ int ib_security_modify_qp(struct ib_qp *qp, bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) || (qp_attr_mask & IB_QP_ALT_PATH)); + WARN_ONCE((qp_attr_mask & IB_QP_PORT && + rdma_protocol_ib(real_qp->device, qp_attr->port_num) && + !real_qp->qp_sec), + "%s: QP security is not initialized for IB QP: %d\n", + __func__, real_qp->qp_num); + /* The port/pkey settings are maintained only for the real QP. Open * handles on the real QP will be in the shared_qp_list. When * enforcing security on the real QP all the shared QPs will be * checked as well. */ - if (pps_change && !special_qp) { + if (pps_change && !special_qp && real_qp->qp_sec) { mutex_lock(&real_qp->qp_sec->mutex); new_pps = get_new_pps(real_qp, qp_attr, qp_attr_mask); - + if (!new_pps) { + mutex_unlock(&real_qp->qp_sec->mutex); + return -ENOMEM; + } /* Add this QP to the lists for the new port * and pkey settings before checking for permission * in case there is a concurrent cache update @@ -600,7 +630,7 @@ int ib_security_modify_qp(struct ib_qp *qp, qp_attr_mask, udata); - if (pps_change && !special_qp) { + if (new_pps) { /* Clean up the lists and free the appropriate * ports_pkeys structure. */ @@ -631,6 +661,9 @@ int ib_security_pkey_access(struct ib_device *dev, u16 pkey; int ret; + if (!rdma_protocol_ib(dev, port_num)) + return 0; + ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey); if (ret) return ret; @@ -665,6 +698,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent, { int ret; + if (!rdma_protocol_ib(agent->device, agent->port_num)) + return 0; + ret = security_ib_alloc_security(&agent->security); if (ret) return ret; @@ -690,6 +726,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent, void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) { + if (!rdma_protocol_ib(agent->device, agent->port_num)) + return; + security_ib_free_security(agent->security); if (agent->lsm_nb_reg) unregister_lsm_notifier(&agent->lsm_nb); @@ -697,6 +736,9 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) { + if (!rdma_protocol_ib(map->agent.device, map->agent.port_num)) + return 0; + if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) return -EACCES; From 7b9cf144dc0b164761212586dd86ace41fd601f4 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 6 Oct 2017 15:04:33 +0200 Subject: [PATCH 0516/3326] crypto: talitos - fix AEAD test failures commit ec8c7d14acc0a477429d3a6fade5dab72c996c82 upstream. AEAD tests fail when destination SG list has more than 1 element. [ 2.058752] alg: aead: Test 1 failed on encryption for authenc-hmac-sha1-cbc-aes-talitos [ 2.066965] 00000000: 53 69 6e 67 6c 65 20 62 6c 6f 63 6b 20 6d 73 67 00000010: c0 43 ff 74 c0 43 ff e0 de 83 d1 20 de 84 8e 54 00000020: de 83 d7 c4 [ 2.082138] alg: aead: Test 1 failed on encryption for authenc-hmac-sha1-cbc-aes-talitos [ 2.090435] 00000000: 53 69 6e 67 6c 65 20 62 6c 6f 63 6b 20 6d 73 67 00000010: de 84 ea 58 c0 93 1a 24 de 84 e8 59 de 84 f1 20 00000020: 00 00 00 00 [ 2.105721] alg: aead: Test 1 failed on encryption for authenc-hmac-sha1-cbc-3des-talitos [ 2.114259] 00000000: 6f 54 20 6f 61 4d 79 6e 53 20 63 65 65 72 73 74 00000010: 54 20 6f 6f 4d 20 6e 61 20 79 65 53 72 63 74 65 00000020: 20 73 6f 54 20 6f 61 4d 79 6e 53 20 63 65 65 72 00000030: 73 74 54 20 6f 6f 4d 20 6e 61 20 79 65 53 72 63 00000040: 74 65 20 73 6f 54 20 6f 61 4d 79 6e 53 20 63 65 00000050: 65 72 73 74 54 20 6f 6f 4d 20 6e 61 20 79 65 53 00000060: 72 63 74 65 20 73 6f 54 20 6f 61 4d 79 6e 53 20 00000070: 63 65 65 72 73 74 54 20 6f 6f 4d 20 6e 61 0a 79 00000080: c0 50 f1 ac c0 50 f3 38 c0 50 f3 94 c0 50 f5 30 00000090: c0 99 74 3c [ 2.166410] alg: aead: Test 1 failed on encryption for authenc-hmac-sha1-cbc-3des-talitos [ 2.174794] 00000000: 6f 54 20 6f 61 4d 79 6e 53 20 63 65 65 72 73 74 00000010: 54 20 6f 6f 4d 20 6e 61 20 79 65 53 72 63 74 65 00000020: 20 73 6f 54 20 6f 61 4d 79 6e 53 20 63 65 65 72 00000030: 73 74 54 20 6f 6f 4d 20 6e 61 20 79 65 53 72 63 00000040: 74 65 20 73 6f 54 20 6f 61 4d 79 6e 53 20 63 65 00000050: 65 72 73 74 54 20 6f 6f 4d 20 6e 61 20 79 65 53 00000060: 72 63 74 65 20 73 6f 54 20 6f 61 4d 79 6e 53 20 00000070: 63 65 65 72 73 74 54 20 6f 6f 4d 20 6e 61 0a 79 00000080: c0 50 f1 ac c0 50 f3 38 c0 50 f3 94 c0 50 f5 30 00000090: c0 99 74 3c [ 2.226486] alg: No test for authenc(hmac(sha224),cbc(aes)) (authenc-hmac-sha224-cbc-aes-talitos) [ 2.236459] alg: No test for authenc(hmac(sha224),cbc(aes)) (authenc-hmac-sha224-cbc-aes-talitos) [ 2.247196] alg: aead: Test 1 failed on encryption for authenc-hmac-sha224-cbc-3des-talitos [ 2.255555] 00000000: 6f 54 20 6f 61 4d 79 6e 53 20 63 65 65 72 73 74 00000010: 54 20 6f 6f 4d 20 6e 61 20 79 65 53 72 63 74 65 00000020: 20 73 6f 54 20 6f 61 4d 79 6e 53 20 63 65 65 72 00000030: 73 74 54 20 6f 6f 4d 20 6e 61 20 79 65 53 72 63 00000040: 74 65 20 73 6f 54 20 6f 61 4d 79 6e 53 20 63 65 00000050: 65 72 73 74 54 20 6f 6f 4d 20 6e 61 20 79 65 53 00000060: 72 63 74 65 20 73 6f 54 20 6f 61 4d 79 6e 53 20 00000070: 63 65 65 72 73 74 54 20 6f 6f 4d 20 6e 61 0a 79 00000080: c0 50 f1 ac c0 50 f3 38 c0 50 f3 94 c0 50 f5 30 00000090: c0 99 74 3c c0 96 e5 b8 [ 2.309004] alg: aead: Test 1 failed on encryption for authenc-hmac-sha224-cbc-3des-talitos [ 2.317562] 00000000: 6f 54 20 6f 61 4d 79 6e 53 20 63 65 65 72 73 74 00000010: 54 20 6f 6f 4d 20 6e 61 20 79 65 53 72 63 74 65 00000020: 20 73 6f 54 20 6f 61 4d 79 6e 53 20 63 65 65 72 00000030: 73 74 54 20 6f 6f 4d 20 6e 61 20 79 65 53 72 63 00000040: 74 65 20 73 6f 54 20 6f 61 4d 79 6e 53 20 63 65 00000050: 65 72 73 74 54 20 6f 6f 4d 20 6e 61 20 79 65 53 00000060: 72 63 74 65 20 73 6f 54 20 6f 61 4d 79 6e 53 20 00000070: 63 65 65 72 73 74 54 20 6f 6f 4d 20 6e 61 0a 79 00000080: c0 50 f1 ac c0 50 f3 38 c0 50 f3 94 c0 50 f5 30 00000090: c0 99 74 3c c0 96 e5 b8 [ 2.370710] alg: aead: Test 1 failed on encryption for authenc-hmac-sha256-cbc-aes-talitos [ 2.379177] 00000000: 53 69 6e 67 6c 65 20 62 6c 6f 63 6b 20 6d 73 67 00000010: 54 20 6f 6f 4d 20 6e 61 20 79 65 53 72 63 74 65 00000020: 20 73 6f 54 20 6f 61 4d 79 6e 53 20 63 65 65 72 [ 2.397863] alg: aead: Test 1 failed on encryption for authenc-hmac-sha256-cbc-aes-talitos [ 2.406134] 00000000: 53 69 6e 67 6c 65 20 62 6c 6f 63 6b 20 6d 73 67 00000010: 54 20 6f 6f 4d 20 6e 61 20 79 65 53 72 63 74 65 00000020: 20 73 6f 54 20 6f 61 4d 79 6e 53 20 63 65 65 72 [ 2.424789] alg: aead: Test 1 failed on encryption for authenc-hmac-sha256-cbc-3des-talitos [ 2.433491] 00000000: 6f 54 20 6f 61 4d 79 6e 53 20 63 65 65 72 73 74 00000010: 54 20 6f 6f 4d 20 6e 61 20 79 65 53 72 63 74 65 00000020: 20 73 6f 54 20 6f 61 4d 79 6e 53 20 63 65 65 72 00000030: 73 74 54 20 6f 6f 4d 20 6e 61 20 79 65 53 72 63 00000040: 74 65 20 73 6f 54 20 6f 61 4d 79 6e 53 20 63 65 00000050: 65 72 73 74 54 20 6f 6f 4d 20 6e 61 20 79 65 53 00000060: 72 63 74 65 20 73 6f 54 20 6f 61 4d 79 6e 53 20 00000070: 63 65 65 72 73 74 54 20 6f 6f 4d 20 6e 61 0a 79 00000080: c0 50 f1 ac c0 50 f3 38 c0 50 f3 94 c0 50 f5 30 00000090: c0 99 74 3c c0 96 e5 b8 c0 96 e9 20 c0 00 3d dc [ 2.488832] alg: aead: Test 1 failed on encryption for authenc-hmac-sha256-cbc-3des-talitos [ 2.497387] 00000000: 6f 54 20 6f 61 4d 79 6e 53 20 63 65 65 72 73 74 00000010: 54 20 6f 6f 4d 20 6e 61 20 79 65 53 72 63 74 65 00000020: 20 73 6f 54 20 6f 61 4d 79 6e 53 20 63 65 65 72 00000030: 73 74 54 20 6f 6f 4d 20 6e 61 20 79 65 53 72 63 00000040: 74 65 20 73 6f 54 20 6f 61 4d 79 6e 53 20 63 65 00000050: 65 72 73 74 54 20 6f 6f 4d 20 6e 61 20 79 65 53 00000060: 72 63 74 65 20 73 6f 54 20 6f 61 4d 79 6e 53 20 00000070: 63 65 65 72 73 74 54 20 6f 6f 4d 20 6e 61 0a 79 00000080: c0 50 f1 ac c0 50 f3 38 c0 50 f3 94 c0 50 f5 30 00000090: c0 99 74 3c c0 96 e5 b8 c0 96 e9 20 c0 00 3d dc This patch fixes that. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/talitos.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index dff88838dce762..cd8a37e60259ad 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1232,12 +1232,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, sg_link_tbl_len += authsize; } - sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc, - &desc->ptr[4], sg_count, areq->assoclen, - tbl_off); + ret = talitos_sg_map(dev, areq->src, cryptlen, edesc, &desc->ptr[4], + sg_count, areq->assoclen, tbl_off); - if (sg_count > 1) { - tbl_off += sg_count; + if (ret > 1) { + tbl_off += ret; sync_needed = true; } From 68b426477820e396f204e06c6de4063a82d63422 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 6 Oct 2017 15:04:35 +0200 Subject: [PATCH 0517/3326] crypto: talitos - fix memory corruption on SEC2 commit e04a61bebc5da1535b6f194b464295b8d558e2fc upstream. On SEC2, when using the old descriptors type (hmac snoop no afeu) for doing IPsec, the CICV out pointeur points out of the allocated memory. [ 2.502554] ============================================================================= [ 2.510740] BUG dma-kmalloc-256 (Not tainted): Redzone overwritten [ 2.516907] ----------------------------------------------------------------------------- [ 2.516907] [ 2.526535] Disabling lock debugging due to kernel taint [ 2.531845] INFO: 0xde858108-0xde85810b. First byte 0xf8 instead of 0xcc [ 2.538549] INFO: Allocated in 0x806181a9 age=0 cpu=0 pid=58 [ 2.544229] __kmalloc+0x374/0x564 [ 2.547649] talitos_edesc_alloc+0x17c/0x48c [ 2.551929] aead_edesc_alloc+0x80/0x154 [ 2.555863] aead_encrypt+0x30/0xe0 [ 2.559368] __test_aead+0x5a0/0x1f3c [ 2.563042] test_aead+0x2c/0x110 [ 2.566371] alg_test_aead+0x5c/0xf4 [ 2.569958] alg_test+0x1dc/0x5a0 [ 2.573305] cryptomgr_test+0x50/0x70 [ 2.576984] kthread+0xd8/0x134 [ 2.580155] ret_from_kernel_thread+0x5c/0x64 [ 2.584534] INFO: Freed in ipsec_esp_encrypt_done+0x130/0x240 age=6 cpu=0 pid=0 [ 2.591839] ipsec_esp_encrypt_done+0x130/0x240 [ 2.596395] flush_channel+0x1dc/0x488 [ 2.600161] talitos2_done_4ch+0x30/0x200 [ 2.604185] tasklet_action+0xa0/0x13c [ 2.607948] __do_softirq+0x148/0x6cc [ 2.611623] irq_exit+0xc0/0x124 [ 2.614869] call_do_irq+0x24/0x3c [ 2.618292] do_IRQ+0x78/0x108 [ 2.621369] ret_from_except+0x0/0x14 [ 2.625055] finish_task_switch+0x58/0x350 [ 2.629165] schedule+0x80/0x134 [ 2.632409] schedule_preempt_disabled+0x38/0xc8 [ 2.637042] cpu_startup_entry+0xe4/0x190 [ 2.641074] start_kernel+0x3f4/0x408 [ 2.644741] 0x3438 [ 2.646857] INFO: Slab 0xdffbdb00 objects=9 used=1 fp=0xde8581c0 flags=0x0080 [ 2.653978] INFO: Object 0xde858008 @offset=8 fp=0xca4395df [ 2.653978] [ 2.661032] Redzone de858000: cc cc cc cc cc cc cc cc ........ [ 2.669029] Object de858008: 00 00 00 02 00 00 00 02 00 6b 6b 6b 1e 83 ea 28 .........kkk...( [ 2.677628] Object de858018: 00 00 00 70 1e 85 80 64 ff 73 1d 21 6b 6b 6b 6b ...p...d.s.!kkkk [ 2.686228] Object de858028: 00 20 00 00 1e 84 17 24 00 10 00 00 1e 85 70 00 . .....$......p. [ 2.694829] Object de858038: 00 18 00 00 1e 84 17 44 00 08 00 00 1e 83 ea 28 .......D.......( [ 2.703430] Object de858048: 00 80 00 00 1e 84 f0 00 00 80 00 00 1e 85 70 10 ..............p. [ 2.712030] Object de858058: 00 20 6b 00 1e 85 80 f4 6b 6b 6b 6b 00 80 02 00 . k.....kkkk.... [ 2.720629] Object de858068: 1e 84 f0 00 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b ....kkkkkkkkkkkk [ 2.729230] Object de858078: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk [ 2.737830] Object de858088: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk [ 2.746429] Object de858098: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk [ 2.755029] Object de8580a8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk [ 2.763628] Object de8580b8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk [ 2.772229] Object de8580c8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk [ 2.780829] Object de8580d8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk [ 2.789430] Object de8580e8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 73 b0 ea 9f kkkkkkkkkkkks... [ 2.798030] Object de8580f8: e8 18 80 d6 56 38 44 c0 db e3 4f 71 f7 ce d1 d3 ....V8D...Oq.... [ 2.806629] Redzone de858108: f8 bd 3e 4f ..>O [ 2.814279] Padding de8581b0: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ [ 2.822283] CPU: 0 PID: 0 Comm: swapper Tainted: G B 4.9.50-g995be12679 #179 [ 2.831819] Call Trace: [ 2.834301] [dffefd20] [c01aa9a8] check_bytes_and_report+0x100/0x194 (unreliable) [ 2.841801] [dffefd50] [c01aac3c] check_object+0x200/0x530 [ 2.847306] [dffefd80] [c01ae584] free_debug_processing+0x290/0x690 [ 2.853585] [dffefde0] [c01aec8c] __slab_free+0x308/0x628 [ 2.859000] [dffefe80] [c05057f4] ipsec_esp_encrypt_done+0x130/0x240 [ 2.865378] [dffefeb0] [c05002c4] flush_channel+0x1dc/0x488 [ 2.870968] [dffeff10] [c05007a8] talitos2_done_4ch+0x30/0x200 [ 2.876814] [dffeff30] [c002fe38] tasklet_action+0xa0/0x13c [ 2.882399] [dffeff60] [c002f118] __do_softirq+0x148/0x6cc [ 2.887896] [dffeffd0] [c002f954] irq_exit+0xc0/0x124 [ 2.892968] [dffefff0] [c0013adc] call_do_irq+0x24/0x3c [ 2.898213] [c0d4be00] [c000757c] do_IRQ+0x78/0x108 [ 2.903113] [c0d4be30] [c0015c08] ret_from_except+0x0/0x14 [ 2.908634] --- interrupt: 501 at finish_task_switch+0x70/0x350 [ 2.908634] LR = finish_task_switch+0x58/0x350 [ 2.919327] [c0d4bf20] [c085e1d4] schedule+0x80/0x134 [ 2.924398] [c0d4bf50] [c085e2c0] schedule_preempt_disabled+0x38/0xc8 [ 2.930853] [c0d4bf60] [c007f064] cpu_startup_entry+0xe4/0x190 [ 2.936707] [c0d4bfb0] [c096c434] start_kernel+0x3f4/0x408 [ 2.942198] [c0d4bff0] [00003438] 0x3438 [ 2.946137] FIX dma-kmalloc-256: Restoring 0xde858108-0xde85810b=0xcc [ 2.946137] [ 2.954158] FIX dma-kmalloc-256: Object at 0xde858008 not freed This patch reworks the handling of the CICV out in order to properly handle all cases. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/talitos.c | 42 ++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index cd8a37e60259ad..1e799886c57dc4 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1247,14 +1247,15 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); } - sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc, - &desc->ptr[5], sg_count, areq->assoclen, - tbl_off); + ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5], + sg_count, areq->assoclen, tbl_off); if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1); - if (sg_count > 1) { + /* ICV data */ + if (ret > 1) { + tbl_off += ret; edesc->icv_ool = true; sync_needed = true; @@ -1264,9 +1265,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, sizeof(struct talitos_ptr) + authsize; /* Add an entry to the link table for ICV data */ - tbl_ptr += sg_count - 1; - to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1); - tbl_ptr++; + to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1); to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN, is_sec1); to_talitos_ptr_len(tbl_ptr, authsize, is_sec1); @@ -1274,18 +1273,33 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, /* icv data follows link tables */ to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset, is_sec1); + } else { + dma_addr_t addr = edesc->dma_link_tbl; + + if (is_sec1) + addr += areq->assoclen + cryptlen; + else + addr += sizeof(struct talitos_ptr) * tbl_off; + + to_talitos_ptr(&desc->ptr[6], addr, is_sec1); + to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1); + } + } else if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) { + ret = talitos_sg_map(dev, areq->dst, authsize, edesc, + &desc->ptr[6], sg_count, areq->assoclen + + cryptlen, + tbl_off); + if (ret > 1) { + tbl_off += ret; + edesc->icv_ool = true; + sync_needed = true; + } else { + edesc->icv_ool = false; } } else { edesc->icv_ool = false; } - /* ICV data */ - if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) { - to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1); - to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl + - areq->assoclen + cryptlen, is_sec1); - } - /* iv out */ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, From 62744ebaeb91ff089865f90d0a0a1475a924809f Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 6 Oct 2017 15:04:37 +0200 Subject: [PATCH 0518/3326] crypto: talitos - fix setkey to check key weakness commit f384cdc4faf350fdb6ad93c5f26952b9ba7c7566 upstream. Crypto manager test report the following failures: [ 3.061081] alg: skcipher: setkey failed on test 5 for ecb-des-talitos: flags=100 [ 3.069342] alg: skcipher-ddst: setkey failed on test 5 for ecb-des-talitos: flags=100 [ 3.077754] alg: skcipher-ddst: setkey failed on test 5 for ecb-des-talitos: flags=100 This is due to setkey being expected to detect weak keys. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/talitos.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 1e799886c57dc4..8aa1212086f483 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1507,12 +1507,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 tmp[DES_EXPKEY_WORDS]; if (keylen > TALITOS_MAX_KEY_SIZE) { crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } + if (unlikely(crypto_ablkcipher_get_flags(cipher) & + CRYPTO_TFM_REQ_WEAK_KEY) && + !des_ekey(tmp, key)) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY); + return -EINVAL; + } + memcpy(&ctx->key, key, keylen); ctx->keylen = keylen; From a2d93ada4fd30feeb3d0c914926f7a9fdea10102 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 6 Oct 2017 15:04:39 +0200 Subject: [PATCH 0519/3326] crypto: talitos - fix AEAD for sha224 on non sha224 capable chips commit 6cda075aff67a1b9b5ba1b2818091dc939643b6c upstream. sha224 AEAD test fails with: [ 2.803125] talitos ff020000.crypto: DEUISR 0x00000000_00000000 [ 2.808743] talitos ff020000.crypto: MDEUISR 0x80100000_00000000 [ 2.814678] talitos ff020000.crypto: DESCBUF 0x20731f21_00000018 [ 2.820616] talitos ff020000.crypto: DESCBUF 0x0628d64c_00000010 [ 2.826554] talitos ff020000.crypto: DESCBUF 0x0631005c_00000018 [ 2.832492] talitos ff020000.crypto: DESCBUF 0x0628d664_00000008 [ 2.838430] talitos ff020000.crypto: DESCBUF 0x061b13a0_00000080 [ 2.844369] talitos ff020000.crypto: DESCBUF 0x0631006c_00000080 [ 2.850307] talitos ff020000.crypto: DESCBUF 0x0631006c_00000018 [ 2.856245] talitos ff020000.crypto: DESCBUF 0x063100ec_00000000 [ 2.884972] talitos ff020000.crypto: failed to reset channel 0 [ 2.890503] talitos ff020000.crypto: done overflow, internal time out, or rngu error: ISR 0x20000000_00020000 [ 2.900652] alg: aead: encryption failed on test 1 for authenc-hmac-sha224-cbc-3des-talitos: ret=22 This is due to SHA224 not being supported by the HW. Allthough for hash we are able to init the hash context by SW, it is not possible for AEAD. Therefore SHA224 AEAD has to be deactivated. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/talitos.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 8aa1212086f483..b7184f305867e1 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -3068,6 +3068,11 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, t_alg->algt.alg.aead.setkey = aead_setkey; t_alg->algt.alg.aead.encrypt = aead_encrypt; t_alg->algt.alg.aead.decrypt = aead_decrypt; + if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && + !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) { + kfree(t_alg); + return ERR_PTR(-ENOTSUPP); + } break; case CRYPTO_ALG_TYPE_AHASH: alg = &t_alg->algt.alg.hash.halg.base; From 2040f8e814055a330aae1ed83c3fe2aba26173f0 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 6 Oct 2017 15:04:41 +0200 Subject: [PATCH 0520/3326] crypto: talitos - fix use of sg_link_tbl_len commit fbb22137c4d9bab536958b152d096fb3f98020ea upstream. sg_link_tbl_len shall be used instead of cryptlen, otherwise SECs which perform HW CICV verification will fail. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/talitos.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index b7184f305867e1..cf5c9701b89833 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1232,8 +1232,8 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, sg_link_tbl_len += authsize; } - ret = talitos_sg_map(dev, areq->src, cryptlen, edesc, &desc->ptr[4], - sg_count, areq->assoclen, tbl_off); + ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc, + &desc->ptr[4], sg_count, areq->assoclen, tbl_off); if (ret > 1) { tbl_off += ret; From 077415efefe77f6f3e82ae4868867e32ede01286 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 6 Oct 2017 15:04:43 +0200 Subject: [PATCH 0521/3326] crypto: talitos - fix ctr-aes-talitos commit 70d355ccea899dad47dc22d3a4406998f55143fd upstream. ctr-aes-talitos test fails as follows on SEC2 [ 0.837427] alg: skcipher: Test 1 failed (invalid result) on encryption for ctr-aes-talitos [ 0.845763] 00000000: 16 36 d5 ee 34 f8 06 25 d7 7f 8e 56 ca 88 43 45 [ 0.852345] 00000010: f9 3f f7 17 2a b2 12 23 30 43 09 15 82 dd e1 97 [ 0.858940] 00000020: a7 f7 32 b5 eb 25 06 13 9a ec f5 29 25 f8 4d 66 [ 0.865366] 00000030: b0 03 5b 8e aa 9a 42 b6 19 33 8a e2 9d 65 96 95 This patch fixes the descriptor type which is special for CTR AES Fixes: 5e75ae1b3cef6 ("crypto: talitos - add new crypto modes") Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/talitos.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index cf5c9701b89833..a19b5d0300a953 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2635,7 +2635,7 @@ static struct talitos_alg_template driver_algs[] = { .ivsize = AES_BLOCK_SIZE, } }, - .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP | DESC_HDR_SEL0_AESU | DESC_HDR_MODE0_AESU_CTR, }, From 21e1e6192ba653dfcc42bb0b275a4af7af5e32b0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 24 Nov 2017 23:49:34 +0000 Subject: [PATCH 0522/3326] ARM: BUG if jumping to usermode address in kernel mode commit 8bafae202c82dc257f649ea3c275a0f35ee15113 upstream. Detect if we are returning to usermode via the normal kernel exit paths but the saved PSR value indicates that we are in kernel mode. This could occur due to corrupted stack state, which has been observed with "ftracetest". This ensures that we catch the problem case before we get to user code. Signed-off-by: Russell King Cc: Alex Shi Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/assembler.h | 18 ++++++++++++++++++ arch/arm/kernel/entry-header.S | 6 ++++++ 2 files changed, 24 insertions(+) diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index ad301f107dd286..bc8d4bbd82e277 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -518,4 +518,22 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) #endif .endm + .macro bug, msg, line +#ifdef CONFIG_THUMB2_KERNEL +1: .inst 0xde02 +#else +1: .inst 0xe7f001f2 +#endif +#ifdef CONFIG_DEBUG_BUGVERBOSE + .pushsection .rodata.str, "aMS", %progbits, 1 +2: .asciz "\msg" + .popsection + .pushsection __bug_table, "aw" + .align 2 + .word 1b, 2b + .hword \line + .popsection +#endif + .endm + #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index d523cd8439a3df..7f4d80c2db6bf1 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -300,6 +300,8 @@ mov r2, sp ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr ldr lr, [r2, #\offset + S_PC]! @ get pc + tst r1, #0xcf + bne 1f msr spsr_cxsf, r1 @ save in spsr_svc #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) @ We must avoid clrex due to Cortex-A15 erratum #830321 @@ -314,6 +316,7 @@ @ after ldm {}^ add sp, sp, #\offset + PT_REGS_SIZE movs pc, lr @ return & move spsr_svc into cpsr +1: bug "Returning to usermode but unexpected PSR bits set?", \@ #elif defined(CONFIG_CPU_V7M) @ V7M restore. @ Note that we don't need to do clrex here as clearing the local @@ -329,6 +332,8 @@ ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr lr, [sp, #\offset + S_PC] @ get pc add sp, sp, #\offset + S_SP + tst r1, #0xcf + bne 1f msr spsr_cxsf, r1 @ save in spsr_svc @ We must avoid clrex due to Cortex-A15 erratum #830321 @@ -341,6 +346,7 @@ .endif add sp, sp, #PT_REGS_SIZE - S_SP movs pc, lr @ return & move spsr_svc into cpsr +1: bug "Returning to usermode but unexpected PSR bits set?", \@ #endif /* !CONFIG_THUMB2_KERNEL */ .endm From 14f13c9d58d8d738666d58fa7b2f8872286655bf Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 27 Nov 2017 11:22:42 +0000 Subject: [PATCH 0523/3326] ARM: avoid faulting on qemu commit 3aaf33bebda8d4ffcc0fc8ef39e6c1ac68823b11 upstream. When qemu starts a kernel in a bare environment, the default SCR has the AW and FW bits clear, which means that the kernel can't modify the PSR A or PSR F bits, and means that FIQs and imprecise aborts are always masked. When running uboot under qemu, the AW and FW SCR bits are set, and the kernel functions normally - and this is how real hardware behaves. Fix this for qemu by ignoring the FIQ bit. Fixes: 8bafae202c82 ("ARM: BUG if jumping to usermode address in kernel mode") Signed-off-by: Russell King Cc: Alex Shi Signed-off-by: Greg Kroah-Hartman --- arch/arm/kernel/entry-header.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 7f4d80c2db6bf1..0f07579af472c8 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -300,7 +300,7 @@ mov r2, sp ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr ldr lr, [r2, #\offset + S_PC]! @ get pc - tst r1, #0xcf + tst r1, #PSR_I_BIT | 0x0f bne 1f msr spsr_cxsf, r1 @ save in spsr_svc #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) @@ -332,7 +332,7 @@ ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr lr, [sp, #\offset + S_PC] @ get pc add sp, sp, #\offset + S_SP - tst r1, #0xcf + tst r1, #PSR_I_BIT | 0x0f bne 1f msr spsr_cxsf, r1 @ save in spsr_svc From f0d564230e08d314aac54cbcb1865a4b8fafe906 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 17 Nov 2017 18:35:53 +0000 Subject: [PATCH 0524/3326] irqchip/qcom: Fix u32 comparison with value less than zero [ Upstream commit e9990d70e8a063a7b894c5cbb99f630a0f41200d ] The comparison of u32 nregs being less than zero is never true since nregs is unsigned. Fix this by making nregs a signed integer. Fixes: f20cc9b00c7b ("irqchip/qcom: Add IRQ combiner driver") Signed-off-by: Colin Ian King Signed-off-by: Thomas Gleixner Cc: Marc Zyngier Cc: kernel-janitors@vger.kernel.org Cc: Jason Cooper Link: https://lkml.kernel.org/r/20171117183553.2739-1-colin.king@canonical.com Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/irqchip/qcom-irq-combiner.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c index 6aa3ea4792148d..f3126593743960 100644 --- a/drivers/irqchip/qcom-irq-combiner.c +++ b/drivers/irqchip/qcom-irq-combiner.c @@ -238,7 +238,7 @@ static int __init combiner_probe(struct platform_device *pdev) { struct combiner *combiner; size_t alloc_sz; - u32 nregs; + int nregs; int err; nregs = count_registers(pdev); From 4d0d1bc65b5710f45bc5fe10b4fbfc2671b0851d Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Tue, 21 Nov 2017 13:23:53 +0100 Subject: [PATCH 0525/3326] net/smc: use sk_rcvbuf as start for rmb creation [ Upstream commit 4e1061f4a2bba1669c7297455c73ddafbebf2b12 ] Commit 3e034725c0d8 ("net/smc: common functions for RMBs and send buffers") merged handling of SMC receive and send buffers. It introduced sk_buf_size as merged start value for size determination. But since sk_buf_size is not used at all, sk_sndbuf is erroneously used as start for rmb creation. This patch makes sure, sk_buf_size is really used as intended, and sk_rcvbuf is used as start value for rmb creation. Fixes: 3e034725c0d8 ("net/smc: common functions for RMBs and send buffers") Signed-off-by: Ursula Braun Reviewed-by: Hans Wippel Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/smc/smc_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 413e3868fbf36f..7166e7ecbe861d 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -571,7 +571,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) /* use socket send buffer size (w/o overhead) as start value */ sk_buf_size = smc->sk.sk_sndbuf / 2; - for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2); + for (bufsize_short = smc_compress_bufsize(sk_buf_size); bufsize_short >= 0; bufsize_short--) { if (is_rmb) { From 54a13eb7f03ac8bb92b3b2c358e42eb08834240b Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 15 Nov 2017 18:17:07 +0900 Subject: [PATCH 0526/3326] kbuild: pkg: use --transform option to prefix paths in tar [ Upstream commit 2dbc644ac62bbcb9ee78e84719953f611be0413d ] For rpm-pkg and deb-pkg, a source tar file is created. All paths in the archive must be prefixed with the base name of the tar so that everything is contained in the directory when you extract it. Currently, scripts/package/Makefile uses a symlink for that, and removes it after the tar is created. If you terminate the build during the tar creation, the symlink is left over. Then, at the next package build, you will see a warning like follows: ln: '.' and 'kernel-4.14.0+/.' are the same file It is possible to fix it by adding -n (--no-dereference) option to the "ln" command, but a cleaner way is to use --transform option of "tar" command. This option is GNU extension, but it should not hurt to use it in the Linux build system. The 'S' flag is needed to exclude symlinks from the path fixup. Without it, symlinks in the kernel are broken. Signed-off-by: Masahiro Yamada Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- scripts/package/Makefile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/scripts/package/Makefile b/scripts/package/Makefile index 73f9f3192b9fbf..2b4aa4c19b214c 100644 --- a/scripts/package/Makefile +++ b/scripts/package/Makefile @@ -39,10 +39,9 @@ if test "$(objtree)" != "$(srctree)"; then \ false; \ fi ; \ $(srctree)/scripts/setlocalversion --save-scmversion; \ -ln -sf $(srctree) $(2); \ tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \ - $(addprefix $(2)/,$(TAR_CONTENT) $(3)); \ -rm -f $(2) $(objtree)/.scmversion + --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \ +rm -f $(objtree)/.scmversion # rpm-pkg # --------------------------------------------------------------------------- From 394d0c93b9dd2dee2024a9d922a60b3a3aadb538 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 14 Nov 2017 20:38:07 +0900 Subject: [PATCH 0527/3326] coccinelle: fix parallel build with CHECK=scripts/coccicheck [ Upstream commit d7059ca0147adcd495f3c5b41f260e1ac55bb679 ] The command "make -j8 C=1 CHECK=scripts/coccicheck" produces lots of "coccicheck failed" error messages. Julia Lawall explained the Coccinelle behavior as follows: "The problem on the Coccinelle side is that it uses a subdirectory with the name of the semantic patch to store standard output and standard error for the different threads. I didn't want to use a name with the pid, so that one could easily find this information while Coccinelle is running. Normally the subdirectory is cleaned up when Coccinelle completes, so there is only one of them at a time. Maybe it is best to just add the pid. There is the risk that these subdirectories will accumulate if Coccinelle crashes in a way such that they don't get cleaned up, but Coccinelle could print a warning if it detects this case, rather than failing." When scripts/coccicheck is used as CHECK tool and -j option is given to Make, the whole of build process runs in parallel. So, multiple processes try to get access to the same subdirectory. I notice spatch creates the subdirectory only when it runs in parallel (i.e. --jobs is given and is greater than 1). Setting NPROC=1 is a reasonable solution; spatch does not create the subdirectory. Besides, ONLINE=1 mode takes a single file input for each spatch invocation, so there is no reason to parallelize it in the first place. Signed-off-by: Masahiro Yamada Acked-by: Julia Lawall Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- scripts/coccicheck | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/scripts/coccicheck b/scripts/coccicheck index 28ad1feff9e12d..dda283aba96b42 100755 --- a/scripts/coccicheck +++ b/scripts/coccicheck @@ -30,12 +30,6 @@ else VERBOSE=0 fi -if [ -z "$J" ]; then - NPROC=$(getconf _NPROCESSORS_ONLN) -else - NPROC="$J" -fi - FLAGS="--very-quiet" # You can use SPFLAGS to append extra arguments to coccicheck or override any @@ -70,6 +64,9 @@ if [ "$C" = "1" -o "$C" = "2" ]; then # Take only the last argument, which is the C file to test shift $(( $# - 1 )) OPTIONS="$COCCIINCLUDE $1" + + # No need to parallelize Coccinelle since this mode takes one input file. + NPROC=1 else ONLINE=0 if [ "$KBUILD_EXTMOD" = "" ] ; then @@ -77,6 +74,12 @@ else else OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE" fi + + if [ -z "$J" ]; then + NPROC=$(getconf _NPROCESSORS_ONLN) + else + NPROC="$J" + fi fi if [ "$KBUILD_EXTMOD" != "" ] ; then From 86d1d015fe06267b60d496971d01c859aaa8ce08 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Wed, 22 Nov 2017 10:45:38 +0530 Subject: [PATCH 0528/3326] powerpc/perf: Fix pmu_count to count only nest imc pmus [ Upstream commit de34787f1096cce38e2590be0013b44418d14546 ] "pmu_count" in opal_imc_counters_probe() is intended to hold the number of successful nest imc pmu registerations. But current code also counts other imc units like core_imc and thread_imc. Patch add a check to count only nest imc pmus. Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/platforms/powernv/opal-imc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index 21f6531fae20fc..b150f4deaccfc5 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -191,8 +191,10 @@ static int opal_imc_counters_probe(struct platform_device *pdev) break; } - if (!imc_pmu_create(imc_dev, pmu_count, domain)) - pmu_count++; + if (!imc_pmu_create(imc_dev, pmu_count, domain)) { + if (domain == IMC_DOMAIN_NEST) + pmu_count++; + } } return 0; From 897088926c3477ef8d3d299473a2777dd9d1c9cb Mon Sep 17 00:00:00 2001 From: John Johansen Date: Wed, 15 Nov 2017 15:25:30 -0800 Subject: [PATCH 0529/3326] apparmor: fix leak of null profile name if profile allocation fails [ Upstream commit 4633307e5ed6128975595df43f796a10c41d11c1 ] Fixes: d07881d2edb0 ("apparmor: move new_null_profile to after profile lookup fns()") Reported-by: Seth Arnold Signed-off-by: John Johansen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- security/apparmor/policy.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 4243b0c3f0e4ac..586b249d3b46a1 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -502,7 +502,7 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat, { struct aa_profile *p, *profile; const char *bname; - char *name; + char *name = NULL; AA_BUG(!parent); @@ -562,6 +562,7 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat, return profile; fail: + kfree(name); aa_free_profile(profile); return NULL; } From ad7acca17e31d14ceca8824728440e727150ddd0 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 10 Nov 2017 16:12:29 -0800 Subject: [PATCH 0530/3326] x86/mpx/selftests: Fix up weird arrays [ Upstream commit a6400120d042397675fcf694060779d21e9e762d ] The MPX hardware data structurse are defined in a weird way: they define their size in bytes and then union that with the type with which we want to access them. Yes, this is weird, but it does work. But, new GCC's complain that we are accessing the array out of bounds. Just make it a zero-sized array so gcc will stop complaining. There was not really a bug here. Signed-off-by: Dave Hansen Acked-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20171111001229.58A7933D@viggo.jf.intel.com Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/x86/mpx-hw.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/x86/mpx-hw.h b/tools/testing/selftests/x86/mpx-hw.h index 3f0093911f03d5..d1b61ab870f8d9 100644 --- a/tools/testing/selftests/x86/mpx-hw.h +++ b/tools/testing/selftests/x86/mpx-hw.h @@ -52,14 +52,14 @@ struct mpx_bd_entry { union { char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES]; - void *contents[1]; + void *contents[0]; }; } __attribute__((packed)); struct mpx_bt_entry { union { char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES]; - unsigned long contents[1]; + unsigned long contents[0]; }; } __attribute__((packed)); From 266fd76296bea9f392ae205e7a08f5f5726dc7f9 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 10 Nov 2017 18:48:50 +0000 Subject: [PATCH 0531/3326] mac80211_hwsim: Fix memory leak in hwsim_new_radio_nl() [ Upstream commit 67bd52386125ce1159c0581cbcd2740addf33cd4 ] hwsim_new_radio_nl() now copies the name attribute in order to add a null-terminator. mac80211_hwsim_new_radio() (indirectly) copies it again into the net_device structure, so the first copy is not used or freed later. Free the first copy before returning. Fixes: ff4dd73dd2b4 ("mac80211_hwsim: check HWSIM_ATTR_RADIO_NAME length") Signed-off-by: Ben Hutchings Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/mac80211_hwsim.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 6467ffac9811e7..a59b54328c07b8 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3108,6 +3108,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct hwsim_new_radio_params param = { 0 }; const char *hwname = NULL; + int ret; param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; @@ -3147,7 +3148,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) param.regd = hwsim_world_regdom_custom[idx]; } - return mac80211_hwsim_new_radio(info, ¶m); + ret = mac80211_hwsim_new_radio(info, ¶m); + kfree(hwname); + return ret; } static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) From ffe6293d19dc965916177986aec254d0cb9cd71e Mon Sep 17 00:00:00 2001 From: Alexey Kodanev Date: Fri, 17 Nov 2017 19:16:17 +0300 Subject: [PATCH 0532/3326] gre6: use log_ecn_error module parameter in ip6_tnl_rcv() [ Upstream commit 981542c526ecd846920bc500e9989da906ee9fb9 ] After commit 308edfdf1563 ("gre6: Cleanup GREv6 receive path, call common GRE functions") it's not used anywhere in the module, but previously was used in ip6gre_rcv(). Fixes: 308edfdf1563 ("gre6: Cleanup GREv6 receive path, call common GRE functions") Signed-off-by: Alexey Kodanev Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_gre.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 59c121b932ac51..5d6bee070871f1 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -461,7 +461,7 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) &ipv6h->saddr, &ipv6h->daddr, tpi->key, tpi->proto); if (tunnel) { - ip6_tnl_rcv(tunnel, skb, tpi, NULL, false); + ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error); return PACKET_RCVD; } From 407de7d97a689d478e1c4b4fe2198af9c8cc3fcb Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Nov 2017 14:27:18 +0800 Subject: [PATCH 0533/3326] route: also update fnhe_genid when updating a route cache [ Upstream commit cebe84c6190d741045a322f5343f717139993c08 ] Now when ip route flush cache and it turn out all fnhe_genid != genid. If a redirect/pmtu icmp packet comes and the old fnhe is found and all it's members but fnhe_genid will be updated. Then next time when it looks up route and tries to rebind this fnhe to the new dst, the fnhe will be flushed due to fnhe_genid != genid. It causes this redirect/pmtu icmp packet acutally not to be applied. This patch is to also reset fnhe_genid when updating a route cache. Fixes: 5aad1de5ea2c ("ipv4: use separate genid for next hop exceptions") Acked-by: Hannes Frederic Sowa Signed-off-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv4/route.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 3d9f1c2f81c58a..aa659433a97395 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -651,9 +651,12 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, struct fnhe_hash_bucket *hash; struct fib_nh_exception *fnhe; struct rtable *rt; + u32 genid, hval; unsigned int i; int depth; - u32 hval = fnhe_hashfun(daddr); + + genid = fnhe_genid(dev_net(nh->nh_dev)); + hval = fnhe_hashfun(daddr); spin_lock_bh(&fnhe_lock); @@ -676,6 +679,8 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, } if (fnhe) { + if (fnhe->fnhe_genid != genid) + fnhe->fnhe_genid = genid; if (gw) fnhe->fnhe_gw = gw; if (pmtu) { @@ -700,7 +705,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, fnhe->fnhe_next = hash->chain; rcu_assign_pointer(hash->chain, fnhe); } - fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev)); + fnhe->fnhe_genid = genid; fnhe->fnhe_daddr = daddr; fnhe->fnhe_gw = gw; fnhe->fnhe_pmtu = pmtu; From ad199b18a9c00729bd000cf3565f6a45ebd4e177 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Nov 2017 14:27:06 +0800 Subject: [PATCH 0534/3326] route: update fnhe_expires for redirect when the fnhe exists [ Upstream commit e39d5246111399dbc6e11cd39fd8580191b86c47 ] Now when creating fnhe for redirect, it sets fnhe_expires for this new route cache. But when updating the exist one, it doesn't do it. It will cause this fnhe never to be expired. Paolo already noticed it before, in Jianlin's test case, it became even worse: When ip route flush cache, the old fnhe is not to be removed, but only clean it's members. When redirect comes again, this fnhe will be found and updated, but never be expired due to fnhe_expires not being set. So fix it by simply updating fnhe_expires even it's for redirect. Fixes: aee06da6726d ("ipv4: use seqlock for nh_exceptions") Reported-by: Jianlin Shi Acked-by: Hannes Frederic Sowa Signed-off-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv4/route.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index aa659433a97395..647cfc972bde0f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -683,10 +683,9 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, fnhe->fnhe_genid = genid; if (gw) fnhe->fnhe_gw = gw; - if (pmtu) { + if (pmtu) fnhe->fnhe_pmtu = pmtu; - fnhe->fnhe_expires = max(1UL, expires); - } + fnhe->fnhe_expires = max(1UL, expires); /* Update all cached dsts too */ rt = rcu_dereference(fnhe->fnhe_rth_input); if (rt) From f52688ce0d7bb5bd6932be99faec931be59d9c63 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 16 Nov 2017 17:39:18 +0000 Subject: [PATCH 0535/3326] rsi: fix memory leak on buf and usb_reg_buf [ Upstream commit d35ef8f846c72d84bfccf239c248c84f79c3a7e8 ] In the cases where len is too long, the error return path fails to kfree allocated buffers buf and usb_reg_buf. The simplest fix is to perform the sanity check on len before the allocations to avoid having to do the kfree'ing in the first place. Detected by CoverityScan, CID#1452258,1452259 ("Resource Leak") Fixes: 59f73e2ae185 ("rsi: check length before USB read/write register") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/rsi/rsi_91x_usb.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 81df09dd2636a6..f90c10b3c92119 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -162,13 +162,13 @@ static int rsi_usb_reg_read(struct usb_device *usbdev, u8 *buf; int status = -ENOMEM; + if (len > RSI_USB_CTRL_BUF_SIZE) + return -EINVAL; + buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL); if (!buf) return status; - if (len > RSI_USB_CTRL_BUF_SIZE) - return -EINVAL; - status = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), USB_VENDOR_REGISTER_READ, @@ -207,13 +207,13 @@ static int rsi_usb_reg_write(struct usb_device *usbdev, u8 *usb_reg_buf; int status = -ENOMEM; + if (len > RSI_USB_CTRL_BUF_SIZE) + return -EINVAL; + usb_reg_buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL); if (!usb_reg_buf) return status; - if (len > RSI_USB_CTRL_BUF_SIZE) - return -EINVAL; - usb_reg_buf[0] = (value & 0x00ff); usb_reg_buf[1] = (value & 0xff00) >> 8; usb_reg_buf[2] = 0x0; From 65c4767b0e715d9c3310f9d26a7e981caf15c428 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 17 Nov 2017 15:37:57 -0800 Subject: [PATCH 0536/3326] drivers/rapidio/devices/rio_mport_cdev.c: fix resource leak in error handling path in 'rio_dma_transfer()' [ Upstream commit b1402dcb5643b7a27d46a05edd7491d49ba0e248 ] If 'dma_map_sg()', we should branch to the existing error handling path to free some resources before returning. Link: http://lkml.kernel.org/r/61292a4f369229eee03394247385e955027283f8.1505687047.git.christophe.jaillet@wanadoo.fr Signed-off-by: Christophe JAILLET Reviewed-by: Logan Gunthorpe Cc: Matt Porter Cc: Alexandre Bounine Cc: Lorenzo Stoakes Cc: Jesper Nilsson Cc: Christian K_nig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/rapidio/devices/rio_mport_cdev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 5beb0c361076ba..76afe1449cab10 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -963,7 +963,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, req->sgt.sgl, req->sgt.nents, dir); if (nents == -EFAULT) { rmcd_error("Failed to map SG list"); - return -EFAULT; + ret = -EFAULT; + goto err_pg; } ret = do_dma_request(req, xfer, sync, nents); From 30c2f774e13547f0dbb68cf779911d4d8920f0bc Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Fri, 17 Nov 2017 15:29:17 -0800 Subject: [PATCH 0537/3326] pipe: match pipe_max_size data type with procfs [ Upstream commit 98159d977f71c3b3dee898d1c34e56f520b094e7 ] Patch series "A few round_pipe_size() and pipe-max-size fixups", v3. While backporting Michael's "pipe: fix limit handling" patchset to a distro-kernel, Mikulas noticed that current upstream pipe limit handling contains a few problems: 1 - procfs signed wrap: echo'ing a large number into /proc/sys/fs/pipe-max-size and then cat'ing it back out shows a negative value. 2 - round_pipe_size() nr_pages overflow on 32bit: this would subsequently try roundup_pow_of_two(0), which is undefined. 3 - visible non-rounded pipe-max-size value: there is no mutual exclusion or protection between the time pipe_max_size is assigned a raw value from proc_dointvec_minmax() and when it is rounded. 4 - unsigned long -> unsigned int conversion makes for potential odd return errors from do_proc_douintvec_minmax_conv() and do_proc_dopipe_max_size_conv(). This version underwent the same testing as v1: https://marc.info/?l=linux-kernel&m=150643571406022&w=2 This patch (of 4): pipe_max_size is defined as an unsigned int: unsigned int pipe_max_size = 1048576; but its procfs/sysctl representation is an integer: static struct ctl_table fs_table[] = { ... { .procname = "pipe-max-size", .data = &pipe_max_size, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &pipe_proc_fn, .extra1 = &pipe_min_size, }, ... that is signed: int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf, size_t *lenp, loff_t *ppos) { ... ret = proc_dointvec_minmax(table, write, buf, lenp, ppos) This leads to signed results via procfs for large values of pipe_max_size: % echo 2147483647 >/proc/sys/fs/pipe-max-size % cat /proc/sys/fs/pipe-max-size -2147483648 Use unsigned operations on this variable to avoid such negative values. Link: http://lkml.kernel.org/r/1507658689-11669-2-git-send-email-joe.lawrence@redhat.com Signed-off-by: Joe Lawrence Reported-by: Mikulas Patocka Reviewed-by: Mikulas Patocka Cc: Michael Kerrisk Cc: Randy Dunlap Cc: Al Viro Cc: Jens Axboe Cc: Josh Poimboeuf Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/pipe.c | 2 +- kernel/sysctl.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/pipe.c b/fs/pipe.c index 349c9d56d4b34a..3909c55ed389bb 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -1125,7 +1125,7 @@ int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf, { int ret; - ret = proc_dointvec_minmax(table, write, buf, lenp, ppos); + ret = proc_douintvec_minmax(table, write, buf, lenp, ppos); if (ret < 0 || !write) return ret; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index d9c31bc2eaea2c..56aca862c4f584 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1822,7 +1822,7 @@ static struct ctl_table fs_table[] = { { .procname = "pipe-max-size", .data = &pipe_max_size, - .maxlen = sizeof(int), + .maxlen = sizeof(pipe_max_size), .mode = 0644, .proc_handler = &pipe_proc_fn, .extra1 = &pipe_min_size, From 346008fe47ed099f1793f2ba6721c9bc5b67e3a3 Mon Sep 17 00:00:00 2001 From: Stephen Bates Date: Fri, 17 Nov 2017 15:28:16 -0800 Subject: [PATCH 0538/3326] lib/genalloc.c: make the avail variable an atomic_long_t [ Upstream commit 36a3d1dd4e16bcd0d2ddfb4a2ec7092f0ae0d931 ] If the amount of resources allocated to a gen_pool exceeds 2^32 then the avail atomic overflows and this causes problems when clients try and borrow resources from the pool. This is only expected to be an issue on 64 bit systems. Add the header to pull in atomic_long* operations. So that 32 bit systems continue to use atomic32_t but 64 bit systems can use atomic64_t. Link: http://lkml.kernel.org/r/1509033843-25667-1-git-send-email-sbates@raithlin.com Signed-off-by: Stephen Bates Reviewed-by: Logan Gunthorpe Reviewed-by: Mathieu Desnoyers Reviewed-by: Daniel Mentz Cc: Jonathan Corbet Cc: Andrew Morton Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/linux/genalloc.h | 3 ++- lib/genalloc.c | 10 +++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 6dfec4d638df3e..872f930f1b06d4 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -32,6 +32,7 @@ #include #include +#include struct device; struct device_node; @@ -71,7 +72,7 @@ struct gen_pool { */ struct gen_pool_chunk { struct list_head next_chunk; /* next chunk in pool */ - atomic_t avail; + atomic_long_t avail; phys_addr_t phys_addr; /* physical starting address of memory chunk */ unsigned long start_addr; /* start address of memory chunk */ unsigned long end_addr; /* end address of memory chunk (inclusive) */ diff --git a/lib/genalloc.c b/lib/genalloc.c index 144fe6b1a03ea5..ca06adc4f44510 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -194,7 +194,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy chunk->phys_addr = phys; chunk->start_addr = virt; chunk->end_addr = virt + size - 1; - atomic_set(&chunk->avail, size); + atomic_long_set(&chunk->avail, size); spin_lock(&pool->lock); list_add_rcu(&chunk->next_chunk, &pool->chunks); @@ -304,7 +304,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, nbits = (size + (1UL << order) - 1) >> order; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { - if (size > atomic_read(&chunk->avail)) + if (size > atomic_long_read(&chunk->avail)) continue; start_bit = 0; @@ -324,7 +324,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, addr = chunk->start_addr + ((unsigned long)start_bit << order); size = nbits << order; - atomic_sub(size, &chunk->avail); + atomic_long_sub(size, &chunk->avail); break; } rcu_read_unlock(); @@ -390,7 +390,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); BUG_ON(remain); size = nbits << order; - atomic_add(size, &chunk->avail); + atomic_long_add(size, &chunk->avail); rcu_read_unlock(); return; } @@ -464,7 +464,7 @@ size_t gen_pool_avail(struct gen_pool *pool) rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) - avail += atomic_read(&chunk->avail); + avail += atomic_long_read(&chunk->avail); rcu_read_unlock(); return avail; } From 8cb22e0793d41e466bbaa42c01a87afb869c8e9b Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 17 Nov 2017 15:27:35 -0800 Subject: [PATCH 0539/3326] dynamic-debug-howto: fix optional/omitted ending line number to be LARGE instead of 0 [ Upstream commit 1f3c790bd5989fcfec9e53ad8fa09f5b740c958f ] line-range is supposed to treat "1-" as "1-endoffile", so handle the special case by setting last_lineno to UINT_MAX. Fixes this error: dynamic_debug:ddebug_parse_query: last-line:0 < 1st-line:1 dynamic_debug:ddebug_exec_query: query parse failed Link: http://lkml.kernel.org/r/10a6a101-e2be-209f-1f41-54637824788e@infradead.org Signed-off-by: Randy Dunlap Acked-by: Jason Baron Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- lib/dynamic_debug.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index da796e2dc4f506..c7c96bc7654af7 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -360,6 +360,10 @@ static int ddebug_parse_query(char *words[], int nwords, if (parse_lineno(last, &query->last_lineno) < 0) return -EINVAL; + /* special case for last lineno not specified */ + if (query->last_lineno == 0) + query->last_lineno = UINT_MAX; + if (query->last_lineno < query->first_lineno) { pr_err("last-line:%d < 1st-line:%d\n", query->last_lineno, From 57f94fd10554a2d73e92490ba0eb8e575b8b8fbb Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 6 Nov 2017 15:28:04 -0500 Subject: [PATCH 0540/3326] NFS: Fix a typo in nfs_rename() [ Upstream commit d803224c84be067754db7fa58a93f36f61566493 ] On successful rename, the "old_dentry" is retained and is attached to the "new_dir", so we need to call nfs_set_verifier() accordingly. Signed-off-by: Trond Myklebust Signed-off-by: Anna Schumaker Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/nfs/dir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index b03b3bc05f9615..bf2c43635062b3 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2064,7 +2064,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, * should mark the directories for revalidation. */ d_move(old_dentry, new_dentry); - nfs_set_verifier(new_dentry, + nfs_set_verifier(old_dentry, nfs_save_change_attribute(new_dir)); } else if (error == -ENOENT) nfs_dentry_handle_enoent(old_dentry); From 94d6b7fa7226dba65a3ea718db6499a87b7623c3 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 3 Nov 2017 13:46:06 -0400 Subject: [PATCH 0541/3326] sunrpc: Fix rpc_task_begin trace point [ Upstream commit b2bfe5915d5fe7577221031a39ac722a0a2a1199 ] The rpc_task_begin trace point always display a task ID of zero. Move the trace point call site so that it picks up the new task ID. Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/sunrpc/sched.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 0cc83839c13c36..f9db5fe52d3674 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -274,10 +274,9 @@ static inline void rpc_task_set_debuginfo(struct rpc_task *task) static void rpc_set_active(struct rpc_task *task) { - trace_rpc_task_begin(task->tk_client, task, NULL); - rpc_task_set_debuginfo(task); set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); + trace_rpc_task_begin(task->tk_client, task, NULL); } /* From cfcbc4f35a73c95d8843495274cd9fff1d5f4c33 Mon Sep 17 00:00:00 2001 From: Dirk van der Merwe Date: Thu, 16 Nov 2017 17:06:41 -0800 Subject: [PATCH 0542/3326] nfp: inherit the max_mtu from the PF netdev [ Upstream commit 743ba5b47f7961fb29f2e06bb694fb4f068ac58f ] The PF netdev is used for data transfer for reprs, so reprs inherit the maximum MTU settings of the PF netdev. Fixes: 5de73ee46704 ("nfp: general representor implementation") Signed-off-by: Dirk van der Merwe Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index d540a9dc77b3cc..1c43aca8162db8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -297,6 +297,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, netdev->netdev_ops = &nfp_repr_netdev_ops; netdev->ethtool_ops = &nfp_port_ethtool_ops; + netdev->max_mtu = pf_netdev->max_mtu; + SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); if (nfp_app_has_tc(app)) { From 1cb98be5f989fc2820d7198170098939c38579a3 Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Thu, 16 Nov 2017 17:06:39 -0800 Subject: [PATCH 0543/3326] nfp: fix flower offload metadata flag usage [ Upstream commit 6c3ab204f4ca00374a374bc0fc9a275b64d1bcbb ] Hardware has no notion of new or last mask id, instead it makes use of the message type (i.e. add flow or del flow) in combination with a single bit in metadata flags to determine when to add or delete a mask id. Previously we made use of the new or last flags to indicate that a new mask should be allocated or deallocated, respectively. This incorrect behaviour is fixed by making use single bit in metadata flags to indicate mask allocation or deallocation. Fixes: 43f84b72c50d ("nfp: add metadata to each flow offload") Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/netronome/nfp/flower/main.h | 3 +-- drivers/net/ethernet/netronome/nfp/flower/metadata.c | 7 +++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index c20dd00a1caeca..899e7d53e66973 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -52,8 +52,7 @@ struct nfp_app; #define NFP_FLOWER_MASK_ELEMENT_RS 1 #define NFP_FLOWER_MASK_HASH_BITS 10 -#define NFP_FL_META_FLAG_NEW_MASK 128 -#define NFP_FL_META_FLAG_LAST_MASK 1 +#define NFP_FL_META_FLAG_MANAGE_MASK BIT(7) #define NFP_FL_MASK_REUSE_TIME_NS 40000 #define NFP_FL_MASK_ID_LOCATION 1 diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 3226ddc55f99be..d9582ccc002545 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -282,7 +282,7 @@ nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len, id = nfp_add_mask_table(app, mask_data, mask_len); if (id < 0) return false; - *meta_flags |= NFP_FL_META_FLAG_NEW_MASK; + *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; } *mask_id = id; @@ -299,6 +299,9 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, if (!mask_entry) return false; + if (meta_flags) + *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK; + *mask_id = mask_entry->mask_id; mask_entry->ref_cnt--; if (!mask_entry->ref_cnt) { @@ -306,7 +309,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, nfp_release_mask_id(app, *mask_id); kfree(mask_entry); if (meta_flags) - *meta_flags |= NFP_FL_META_FLAG_LAST_MASK; + *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; } return true; From f4da9e07a6a27939f9e976ade9945c735ad026b9 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 14 Nov 2017 16:34:44 -0800 Subject: [PATCH 0544/3326] xfs: fix forgotten rcu read unlock when skipping inode reclaim [ Upstream commit 962cc1ad6caddb5abbb9f0a43e5abe7131a71f18 ] In commit f2e9ad21 ("xfs: check for race with xfs_reclaim_inode"), we skip an inode if we're racing with freeing the inode via xfs_reclaim_inode, but we forgot to release the rcu read lock when dumping the inode, with the result that we exit to userspace with a lock held. Don't do that; generic/320 with a 1k block size fails this very occasionally. ================================================ WARNING: lock held when returning to user space! 4.14.0-rc6-djwong #4 Tainted: G W ------------------------------------------------ rm/30466 is leaving the kernel with locks still held! 1 lock held by rm/30466: #0: (rcu_read_lock){....}, at: [] xfs_ifree_cluster.isra.17+0x2c3/0x6f0 [xfs] ------------[ cut here ]------------ WARNING: CPU: 1 PID: 30466 at kernel/rcu/tree_plugin.h:329 rcu_note_context_switch+0x71/0x700 Modules linked in: deadline_iosched dm_snapshot dm_bufio ext4 mbcache jbd2 dm_flakey xfs libcrc32c dax_pmem device_dax nd_pmem sch_fq_codel af_packet [last unloaded: scsi_debug] CPU: 1 PID: 30466 Comm: rm Tainted: G W 4.14.0-rc6-djwong #4 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.10.2-1ubuntu1djwong0 04/01/2014 task: ffff880037680000 task.stack: ffffc90001064000 RIP: 0010:rcu_note_context_switch+0x71/0x700 RSP: 0000:ffffc90001067e50 EFLAGS: 00010002 RAX: 0000000000000001 RBX: ffff880037680000 RCX: ffff88003e73d200 RDX: 0000000000000002 RSI: ffffffff819e53e9 RDI: ffffffff819f4375 RBP: 0000000000000000 R08: 0000000000000000 R09: ffff880062c900d0 R10: 0000000000000000 R11: 0000000000000000 R12: ffff880037680000 R13: 0000000000000000 R14: ffffc90001067eb8 R15: ffff880037680690 FS: 00007fa3b8ce8700(0000) GS:ffff88003ec00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f69bf77c000 CR3: 000000002450a000 CR4: 00000000000006e0 Call Trace: __schedule+0xb8/0xb10 schedule+0x40/0x90 exit_to_usermode_loop+0x6b/0xa0 prepare_exit_to_usermode+0x7a/0x90 retint_user+0x8/0x20 RIP: 0033:0x7fa3b87fda87 RSP: 002b:00007ffe41206568 EFLAGS: 00000246 ORIG_RAX: ffffffffffffff02 RAX: 0000000000000000 RBX: 00000000010e88c0 RCX: 00007fa3b87fda87 RDX: 0000000000000000 RSI: 00000000010e89c8 RDI: 0000000000000005 RBP: 0000000000000000 R08: 0000000000000003 R09: 0000000000000000 R10: 000000000000015e R11: 0000000000000246 R12: 00000000010c8060 R13: 00007ffe41206690 R14: 0000000000000000 R15: 0000000000000000 ---[ end trace e88f83bf0cfbd07d ]--- Fixes: f2e9ad212def50bcf4c098c6288779dd97fff0f0 Cc: Omar Sandoval Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig Reviewed-by: Omar Sandoval Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/xfs/xfs_inode.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 4ec5b7f4540137..63350906961ad6 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2378,6 +2378,7 @@ xfs_ifree_cluster( */ if (ip->i_ino != inum + i) { xfs_iunlock(ip, XFS_ILOCK_EXCL); + rcu_read_unlock(); continue; } } From 84e0b87ebfb56041d8130bf220d1494515332b65 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 9 Nov 2017 18:07:17 +0100 Subject: [PATCH 0545/3326] dt-bindings: usb: fix reg-property port-number range [ Upstream commit f42ae7b0540937e00fe005812997f126aaac4bc2 ] The USB hub port-number range for USB 2.0 is 1-255 and not 1-31 which reflects an arbitrary limit set by the current Linux implementation. Note that for USB 3.1 hubs the valid range is 1-15. Increase the documented valid range in the binding to 255, which is the maximum allowed by the specifications. Signed-off-by: Johan Hovold Signed-off-by: Rob Herring Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- Documentation/devicetree/bindings/usb/usb-device.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/usb/usb-device.txt b/Documentation/devicetree/bindings/usb/usb-device.txt index ce02cebac26afd..464ddf7b509a5a 100644 --- a/Documentation/devicetree/bindings/usb/usb-device.txt +++ b/Documentation/devicetree/bindings/usb/usb-device.txt @@ -11,7 +11,7 @@ Required properties: be used, but a device adhering to this binding may leave out all except for usbVID,PID. - reg: the port number which this device is connecting to, the range - is 1-31. + is 1-255. Example: From 60bed713ab191588edeba77f2cfa8cc13ccb7000 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 16 Nov 2017 08:08:44 +0800 Subject: [PATCH 0546/3326] block: wake up all tasks blocked in get_request() [ Upstream commit 34d9715ac1edd50285168dd8d80c972739a4f6a4 ] Once blk_set_queue_dying() is done in blk_cleanup_queue(), we call blk_freeze_queue() and wait for q->q_usage_counter becoming zero. But if there are tasks blocked in get_request(), q->q_usage_counter can never become zero. So we have to wake up all these tasks in blk_set_queue_dying() first. Fixes: 3ef28e83ab157997 ("block: generic request_queue reference counting") Signed-off-by: Ming Lei Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- block/blk-core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 33ee583cfe45d1..516ce3174683a9 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -605,8 +605,8 @@ void blk_set_queue_dying(struct request_queue *q) spin_lock_irq(q->queue_lock); blk_queue_for_each_rl(rl, q) { if (rl->rq_pool) { - wake_up(&rl->wait[BLK_RW_SYNC]); - wake_up(&rl->wait[BLK_RW_ASYNC]); + wake_up_all(&rl->wait[BLK_RW_SYNC]); + wake_up_all(&rl->wait[BLK_RW_ASYNC]); } } spin_unlock_irq(q->queue_lock); From 99dac8af6e6fe6100aaaba0a69e44fcb886c2b92 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Wed, 15 Nov 2017 17:36:18 -0800 Subject: [PATCH 0547/3326] sparc64/mm: set fields in deferred pages [ Upstream commit 2a20aa171071a334d80c4e5d5af719d8374702fc ] Without deferred struct page feature (CONFIG_DEFERRED_STRUCT_PAGE_INIT), flags and other fields in "struct page"es are never changed prior to first initializing struct pages by going through __init_single_page(). With deferred struct page feature enabled there is a case where we set some fields prior to initializing: mem_init() { register_page_bootmem_info(); free_all_bootmem(); ... } When register_page_bootmem_info() is called only non-deferred struct pages are initialized. But, this function goes through some reserved pages which might be part of the deferred, and thus are not yet initialized. mem_init register_page_bootmem_info register_page_bootmem_info_node get_page_bootmem .. setting fields here .. such as: page->freelist = (void *)type; free_all_bootmem() free_low_memory_core_early() for_each_reserved_mem_region() reserve_bootmem_region() init_reserved_page() <- Only if this is deferred reserved page __init_single_pfn() __init_single_page() memset(0) <-- Loose the set fields here We end up with similar issue as in the previous patch, where currently we do not observe problem as memory is zeroed. But, if flag asserts are changed we can start hitting issues. Also, because in this patch series we will stop zeroing struct page memory during allocation, we must make sure that struct pages are properly initialized prior to using them. The deferred-reserved pages are initialized in free_all_bootmem(). Therefore, the fix is to switch the above calls. Link: http://lkml.kernel.org/r/20171013173214.27300-4-pasha.tatashin@oracle.com Signed-off-by: Pavel Tatashin Reviewed-by: Steven Sistare Reviewed-by: Daniel Jordan Reviewed-by: Bob Picco Acked-by: David S. Miller Acked-by: Michal Hocko Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Dmitry Vyukov Cc: Heiko Carstens Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Mark Rutland Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Sam Ravnborg Cc: Thomas Gleixner Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/sparc/mm/init_64.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 61bdc1270d195e..a0cc1be767c8f5 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2540,9 +2540,16 @@ void __init mem_init(void) { high_memory = __va(last_valid_pfn << PAGE_SHIFT); - register_page_bootmem_info(); free_all_bootmem(); + /* + * Must be done after boot memory is put on freelist, because here we + * might set fields in deferred struct pages that have not yet been + * initialized, and free_all_bootmem() initializes all the reserved + * deferred pages for us. + */ + register_page_bootmem_info(); + /* * Set up the zero page, mark it reserved, so that page count * is not manipulated when freeing the page from user ptes. From 1238334082ea47738c79c2acb4546767593e1a75 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Wed, 15 Nov 2017 17:34:03 -0800 Subject: [PATCH 0548/3326] zsmalloc: calling zs_map_object() from irq is a bug [ Upstream commit 1aedcafbf32b3f232c159b14cd0d423fcfe2b861 ] Use BUG_ON(in_interrupt()) in zs_map_object(). This is not a new BUG_ON(), it's always been there, but was recently changed to VM_BUG_ON(). There are several problems there. First, we use use per-CPU mappings both in zsmalloc and in zram, and interrupt may easily corrupt those buffers. Second, and more importantly, we believe it's possible to start leaking sensitive information. Consider the following case: -> process P swap out zram per-cpu mapping CPU1 compress page A -> IRQ swap out zram per-cpu mapping CPU1 compress page B write page from per-cpu mapping CPU1 to zsmalloc pool iret -> process P write page from per-cpu mapping CPU1 to zsmalloc pool [*] return * so we store overwritten data that actually belongs to another page (task) and potentially contains sensitive data. And when process P will page fault it's going to read (swap in) that other task's data. Link: http://lkml.kernel.org/r/20170929045140.4055-1-sergey.senozhatsky@gmail.com Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- mm/zsmalloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 7c38e850a8fc5c..685049a9048d8e 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1349,7 +1349,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, * pools/users, we can't allow mapping in interrupt context * because it can corrupt another users mappings. */ - WARN_ON_ONCE(in_interrupt()); + BUG_ON(in_interrupt()); /* From now on, migration cannot move the object */ pin_tag(handle); From 5ca94e03675a961424d47ee8d21c4b84606b742d Mon Sep 17 00:00:00 2001 From: Miles Chen Date: Wed, 15 Nov 2017 17:32:25 -0800 Subject: [PATCH 0549/3326] slub: fix sysfs duplicate filename creation when slub_debug=O [ Upstream commit 11066386efa692f77171484c32ea30f6e5a0d729 ] When slub_debug=O is set. It is possible to clear debug flags for an "unmergeable" slab cache in kmem_cache_open(). It makes the "unmergeable" cache became "mergeable" in sysfs_slab_add(). These caches will generate their "unique IDs" by create_unique_id(), but it is possible to create identical unique IDs. In my experiment, sgpool-128, names_cache, biovec-256 generate the same ID ":Ft-0004096" and the kernel reports "sysfs: cannot create duplicate filename '/kernel/slab/:Ft-0004096'". To repeat my experiment, set disable_higher_order_debug=1, CONFIG_SLUB_DEBUG_ON=y in kernel-4.14. Fix this issue by setting unmergeable=1 if slub_debug=O and the the default slub_debug contains any no-merge flags. call path: kmem_cache_create() __kmem_cache_alias() -> we set SLAB_NEVER_MERGE flags here create_cache() __kmem_cache_create() kmem_cache_open() -> clear DEBUG_METADATA_FLAGS sysfs_slab_add() -> the slab cache is mergeable now sysfs: cannot create duplicate filename '/kernel/slab/:Ft-0004096' ------------[ cut here ]------------ WARNING: CPU: 0 PID: 1 at fs/sysfs/dir.c:31 sysfs_warn_dup+0x60/0x7c Modules linked in: CPU: 0 PID: 1 Comm: swapper/0 Tainted: G W 4.14.0-rc7ajb-00131-gd4c2e9f-dirty #123 Hardware name: linux,dummy-virt (DT) task: ffffffc07d4e0080 task.stack: ffffff8008008000 PC is at sysfs_warn_dup+0x60/0x7c LR is at sysfs_warn_dup+0x60/0x7c pc : lr : pstate: 60000145 Call trace: sysfs_warn_dup+0x60/0x7c sysfs_create_dir_ns+0x98/0xa0 kobject_add_internal+0xa0/0x294 kobject_init_and_add+0x90/0xb4 sysfs_slab_add+0x90/0x200 __kmem_cache_create+0x26c/0x438 kmem_cache_create+0x164/0x1f4 sg_pool_init+0x60/0x100 do_one_initcall+0x38/0x12c kernel_init_freeable+0x138/0x1d4 kernel_init+0x10/0xfc ret_from_fork+0x10/0x18 Link: http://lkml.kernel.org/r/1510365805-5155-1-git-send-email-miles.chen@mediatek.com Signed-off-by: Miles Chen Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- mm/slub.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/slub.c b/mm/slub.c index 1efbb812303748..8e1c027a30f4d0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5704,6 +5704,10 @@ static int sysfs_slab_add(struct kmem_cache *s) return 0; } + if (!unmergeable && disable_higher_order_debug && + (slub_debug & DEBUG_METADATA_FLAGS)) + unmergeable = 1; + if (unmergeable) { /* * Slabcache can never be merged so we can use the name proper. From 191d96120f955f23474176da8272a26fd6d0b9bd Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 15 Nov 2017 16:55:54 +0800 Subject: [PATCH 0550/3326] sctp: do not free asoc when it is already dead in sctp_sendmsg [ Upstream commit ca3af4dd28cff4e7216e213ba3b671fbf9f84758 ] Now in sctp_sendmsg sctp_wait_for_sndbuf could schedule out without holding sock sk. It means the current asoc can be freed elsewhere, like when receiving an abort packet. If the asoc is just created in sctp_sendmsg and sctp_wait_for_sndbuf returns err, the asoc will be freed again due to new_asoc is not nil. An use-after-free issue would be triggered by this. This patch is to fix it by setting new_asoc with nil if the asoc is already dead when cpu schedules back, so that it will not be freed again in sctp_sendmsg. v1->v2: set new_asoc as nil in sctp_sendmsg instead of sctp_wait_for_sndbuf. Suggested-by: Neil Horman Reported-by: Dmitry Vyukov Signed-off-by: Xin Long Acked-by: Neil Horman Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 6f45d1713452df..e62251fc6f499f 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1963,8 +1963,14 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); if (!sctp_wspace(asoc)) { err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); - if (err) + if (err) { + if (err == -ESRCH) { + /* asoc is already dead. */ + new_asoc = NULL; + err = -EPIPE; + } goto out_free; + } } /* If an address is passed with the sendto/sendmsg call, it is used @@ -7839,10 +7845,11 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, for (;;) { prepare_to_wait_exclusive(&asoc->wait, &wait, TASK_INTERRUPTIBLE); + if (asoc->base.dead) + goto do_dead; if (!*timeo_p) goto do_nonblock; - if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || - asoc->base.dead) + if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING) goto do_error; if (signal_pending(current)) goto do_interrupted; @@ -7867,6 +7874,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, return err; +do_dead: + err = -ESRCH; + goto out; + do_error: err = -EPIPE; goto out; From 0c7e787bfc91cb374b60ba5fe7055178c94a7cb7 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 15 Nov 2017 16:57:26 +0800 Subject: [PATCH 0551/3326] sctp: use the right sk after waking up from wait_buf sleep [ Upstream commit cea0cc80a6777beb6eb643d4ad53690e1ad1d4ff ] Commit dfcb9f4f99f1 ("sctp: deny peeloff operation on asocs with threads sleeping on it") fixed the race between peeloff and wait sndbuf by checking waitqueue_active(&asoc->wait) in sctp_do_peeloff(). But it actually doesn't work, as even if waitqueue_active returns false the waiting sndbuf thread may still not yet hold sk lock. After asoc is peeled off, sk is not asoc->base.sk any more, then to hold the old sk lock couldn't make assoc safe to access. This patch is to fix this by changing to hold the new sk lock if sk is not asoc->base.sk, meanwhile, also set the sk in sctp_sendmsg with the new sk. With this fix, there is no more race between peeloff and waitbuf, the check 'waitqueue_active' in sctp_do_peeloff can be removed. Thanks Marcelo and Neil for making this clear. v1->v2: fix it by changing to lock the new sock instead of adding a flag in asoc. Suggested-by: Neil Horman Signed-off-by: Xin Long Acked-by: Neil Horman Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e62251fc6f499f..14c28fbfe6b8c7 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -83,8 +83,8 @@ /* Forward declarations for internal helper functions. */ static int sctp_writeable(struct sock *sk); static void sctp_wfree(struct sk_buff *skb); -static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, - size_t msg_len); +static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, + size_t msg_len, struct sock **orig_sk); static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); static int sctp_wait_for_accept(struct sock *sk, long timeo); @@ -1962,7 +1962,8 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); if (!sctp_wspace(asoc)) { - err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); + /* sk can be changed by peel off when waiting for buf. */ + err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk); if (err) { if (err == -ESRCH) { /* asoc is already dead. */ @@ -4949,12 +4950,6 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) if (!asoc) return -EINVAL; - /* If there is a thread waiting on more sndbuf space for - * sending on this asoc, it cannot be peeled. - */ - if (waitqueue_active(&asoc->wait)) - return -EBUSY; - /* An association cannot be branched off from an already peeled-off * socket, nor is this supported for tcp style sockets. */ @@ -7828,7 +7823,7 @@ void sctp_sock_rfree(struct sk_buff *skb) /* Helper function to wait for space in the sndbuf. */ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, - size_t msg_len) + size_t msg_len, struct sock **orig_sk) { struct sock *sk = asoc->base.sk; int err = 0; @@ -7862,11 +7857,17 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, release_sock(sk); current_timeo = schedule_timeout(current_timeo); lock_sock(sk); + if (sk != asoc->base.sk) { + release_sock(sk); + sk = asoc->base.sk; + lock_sock(sk); + } *timeo_p = current_timeo; } out: + *orig_sk = sk; finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ From 6e9c2a05c368a0b53d5b6647798e67830f5508e0 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 14 Nov 2017 14:43:56 -0500 Subject: [PATCH 0552/3326] fcntl: don't leak fd reference when fixup_compat_flock fails [ Upstream commit 9280a601e6080c9ff658468c1c775ff6514099a6 ] Currently we just return err here, but we need to put the fd reference first. Fixes: 94073ad77fff (fs/locks: don't mess with the address limit in compat_fcntl64) Signed-off-by: Jeff Layton Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/fcntl.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/fcntl.c b/fs/fcntl.c index 8d78ffd7b399d4..6fd311367efc8e 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -632,9 +632,8 @@ COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, if (err) break; err = fixup_compat_flock(&flock); - if (err) - return err; - err = put_compat_flock(&flock, compat_ptr(arg)); + if (!err) + err = put_compat_flock(&flock, compat_ptr(arg)); break; case F_GETLK64: case F_OFD_GETLK: From 81a1c2d3f9eb5996714d47dc0fcab91cee0678d4 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Wed, 15 Nov 2017 09:43:09 +0800 Subject: [PATCH 0553/3326] geneve: fix fill_info when link down [ Upstream commit fd7eafd02121d6ef501ef1a4a891e6061366c952 ] geneve->sock4/6 were added with geneve_open and released with geneve_stop. So when geneve link down, we will not able to show remote address and checksum info after commit 11387fe4a98 ("geneve: fix fill_info when using collect_metadata"). Fix this by avoid passing *_REMOTE{,6} for COLLECT_METADATA since they are mutually exclusive, and always show UDP_ZERO_CSUM6_RX info. Fixes: 11387fe4a98 ("geneve: fix fill_info when using collect_metadata") Signed-off-by: Hangbin Liu Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/geneve.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index ed51018a813e7b..b9d8d71a6ecc48 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1503,6 +1503,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); struct ip_tunnel_info *info = &geneve->info; + bool metadata = geneve->collect_md; __u8 tmp_vni[3]; __u32 vni; @@ -1511,32 +1512,24 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) goto nla_put_failure; - if (rtnl_dereference(geneve->sock4)) { + if (!metadata && ip_tunnel_info_af(info) == AF_INET) { if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, info->key.u.ipv4.dst)) goto nla_put_failure; - if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM, !!(info->key.tun_flags & TUNNEL_CSUM))) goto nla_put_failure; - } - #if IS_ENABLED(CONFIG_IPV6) - if (rtnl_dereference(geneve->sock6)) { + } else if (!metadata) { if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6, &info->key.u.ipv6.dst)) goto nla_put_failure; - if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, !(info->key.tun_flags & TUNNEL_CSUM))) goto nla_put_failure; - - if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, - !geneve->use_udp6_rx_checksums)) - goto nla_put_failure; - } #endif + } if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) || nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) || @@ -1546,10 +1539,13 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst)) goto nla_put_failure; - if (geneve->collect_md) { - if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) + if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) goto nla_put_failure; - } + + if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, + !geneve->use_udp6_rx_checksums)) + goto nla_put_failure; + return 0; nla_put_failure: From b316280c81337971e5cbf239400186425de7bae9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 14 Nov 2017 17:15:50 -0800 Subject: [PATCH 0554/3326] bpf: fix lockdep splat [ Upstream commit 89ad2fa3f043a1e8daae193bcb5fe34d5f8caf28 ] pcpu_freelist_pop() needs the same lockdep awareness than pcpu_freelist_populate() to avoid a false positive. [ INFO: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected ] switchto-defaul/12508 [HC0[0]:SC0[6]:HE0:SE0] is trying to acquire: (&htab->buckets[i].lock){......}, at: [] __htab_percpu_map_update_elem+0x1cb/0x300 and this task is already holding: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...}, at: [] __dev_queue_xmit+0 x868/0x1240 which would create a new lock dependency: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...} -> (&htab->buckets[i].lock){......} but this new dependency connects a SOFTIRQ-irq-safe lock: (dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2){+.-...} ... which became SOFTIRQ-irq-safe at: [] __lock_acquire+0x42b/0x1f10 [] lock_acquire+0xbc/0x1b0 [] _raw_spin_lock+0x38/0x50 [] __dev_queue_xmit+0x868/0x1240 [] dev_queue_xmit+0x10/0x20 [] ip_finish_output2+0x439/0x590 [] ip_finish_output+0x150/0x2f0 [] ip_output+0x7d/0x260 [] ip_local_out+0x5e/0xe0 [] ip_queue_xmit+0x205/0x620 [] tcp_transmit_skb+0x5a8/0xcb0 [] tcp_write_xmit+0x242/0x1070 [] __tcp_push_pending_frames+0x3c/0xf0 [] tcp_rcv_established+0x312/0x700 [] tcp_v4_do_rcv+0x11c/0x200 [] tcp_v4_rcv+0xaa2/0xc30 [] ip_local_deliver_finish+0xa7/0x240 [] ip_local_deliver+0x66/0x200 [] ip_rcv_finish+0xdd/0x560 [] ip_rcv+0x295/0x510 [] __netif_receive_skb_core+0x988/0x1020 [] __netif_receive_skb+0x21/0x70 [] process_backlog+0x6f/0x230 [] net_rx_action+0x229/0x420 [] __do_softirq+0xd8/0x43d [] do_softirq_own_stack+0x1c/0x30 [] do_softirq+0x55/0x60 [] __local_bh_enable_ip+0xa8/0xb0 [] cpu_startup_entry+0x1c7/0x500 [] start_secondary+0x113/0x140 to a SOFTIRQ-irq-unsafe lock: (&head->lock){+.+...} ... which became SOFTIRQ-irq-unsafe at: ... [] __lock_acquire+0x82f/0x1f10 [] lock_acquire+0xbc/0x1b0 [] _raw_spin_lock+0x38/0x50 [] pcpu_freelist_pop+0x7a/0xb0 [] htab_map_alloc+0x50c/0x5f0 [] SyS_bpf+0x265/0x1200 [] entry_SYSCALL_64_fastpath+0x12/0x17 other info that might help us debug this: Chain exists of: dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2 --> &htab->buckets[i].lock --> &head->lock Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&head->lock); local_irq_disable(); lock(dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2); lock(&htab->buckets[i].lock); lock(dev_queue->dev->qdisc_class ?: &qdisc_tx_lock#2); *** DEADLOCK *** Fixes: e19494edab82 ("bpf: introduce percpu_freelist") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/percpu_freelist.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c index 5c51d1985b5102..673fa6fe2d73cf 100644 --- a/kernel/bpf/percpu_freelist.c +++ b/kernel/bpf/percpu_freelist.c @@ -78,8 +78,10 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) { struct pcpu_freelist_head *head; struct pcpu_freelist_node *node; + unsigned long flags; int orig_cpu, cpu; + local_irq_save(flags); orig_cpu = cpu = raw_smp_processor_id(); while (1) { head = per_cpu_ptr(s->freelist, cpu); @@ -87,14 +89,16 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) node = head->first; if (node) { head->first = node->next; - raw_spin_unlock(&head->lock); + raw_spin_unlock_irqrestore(&head->lock, flags); return node; } raw_spin_unlock(&head->lock); cpu = cpumask_next(cpu, cpu_possible_mask); if (cpu >= nr_cpu_ids) cpu = 0; - if (cpu == orig_cpu) + if (cpu == orig_cpu) { + local_irq_restore(flags); return NULL; + } } } From 8474be654b2723426f37ad95a48bff1acdb022f7 Mon Sep 17 00:00:00 2001 From: Gabriel Fernandez Date: Wed, 11 Oct 2017 08:57:24 +0200 Subject: [PATCH 0555/3326] clk: stm32h7: fix test of clock config [ Upstream commit c1ea839c41d049604a3f64ef72712d1c7c6639d0 ] fix test of composite clock config (bad copy / past) Signed-off-by: Gabriel Fernandez Fixes: 3e4d618b0722 ("clk: stm32h7: Add stm32h743 clock driver") Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clk/clk-stm32h7.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/clk/clk-stm32h7.c b/drivers/clk/clk-stm32h7.c index a94c3f56c59096..61c3e40507d318 100644 --- a/drivers/clk/clk-stm32h7.c +++ b/drivers/clk/clk-stm32h7.c @@ -384,7 +384,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg, mux_ops = div_ops = gate_ops = NULL; mux_hw = div_hw = gate_hw = NULL; - if (gcfg->mux && gcfg->mux) { + if (gcfg->mux && cfg->mux) { mux = _get_cmux(base + cfg->mux->offset, cfg->mux->shift, cfg->mux->width, @@ -410,7 +410,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg, } } - if (gcfg->gate && gcfg->gate) { + if (gcfg->gate && cfg->gate) { gate = _get_cgate(base + cfg->gate->offset, cfg->gate->bit_idx, gcfg->gate->flags, lock); From de1090298880468eb6e29e5b1e046862f2301950 Mon Sep 17 00:00:00 2001 From: Mylene JOSSERAND Date: Sun, 5 Nov 2017 17:51:34 +0100 Subject: [PATCH 0556/3326] clk: sunxi-ng: a83t: Fix i2c buses bits MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit cc54c0955d6f8618a38a999eecdc3d95306b90de ] i2c1 and i2c2 bits for CCU are not bit 0 but bit 1 and bit 2. Because of that, the i2c0 (bit 0) was not correctly configured. Fixed the correct bits for i2c1 and i2c2. Fixes: 05359be1176b ("clk: sunxi-ng: Add driver for A83T CCU") Signed-off-by: Mylène Josserand Acked-by: Maxime Ripard Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clk/sunxi-ng/ccu-sun8i-a83t.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c index e43acebdfbcdbe..f8203115a6bcea 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c @@ -354,9 +354,9 @@ static SUNXI_CCU_GATE(bus_tdm_clk, "bus-tdm", "apb1", static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2", 0x06c, BIT(0), 0); static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2", - 0x06c, BIT(0), 0); + 0x06c, BIT(1), 0); static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2", - 0x06c, BIT(0), 0); + 0x06c, BIT(2), 0); static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2", 0x06c, BIT(16), 0); static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2", From cd11ce209d73cba69a5776818795446a7991755e Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Sat, 11 Nov 2017 17:29:28 +0100 Subject: [PATCH 0557/3326] clk: qcom: common: fix legacy board-clock registration [ Upstream commit 43a51019cc8ff1b1cd2ba72e86563beb40d356fc ] Make sure to search only the child nodes of "/clocks", rather than the whole device-tree depth-first starting at "/clocks" when determining whether to register a fixed clock in the legacy board-clock registration helper. Fixes: ee15faffef11 ("clk: qcom: common: Add API to register board clocks backwards compatibly") Signed-off-by: Johan Hovold Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clk/qcom/common.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index d523991c945f9d..28ceaf1e99371a 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -143,8 +143,10 @@ static int _qcom_cc_register_board_clk(struct device *dev, const char *path, int ret; clocks_node = of_find_node_by_path("/clocks"); - if (clocks_node) - node = of_find_node_by_name(clocks_node, path); + if (clocks_node) { + node = of_get_child_by_name(clocks_node, path); + of_node_put(clocks_node); + } if (!node) { fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL); From a967ab0f7338e84d379b7a764bf446221de21d3e Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 5 Oct 2017 11:32:59 +0900 Subject: [PATCH 0558/3326] clk: uniphier: fix DAPLL2 clock rate of Pro5 [ Upstream commit 67affb78a4e4feb837953e3434c8402a5c3b272f ] The parent of DAPLL2 should be DAPLL1. Fix the clock connection. Signed-off-by: Masahiro Yamada Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clk/uniphier/clk-uniphier-sys.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c index 07f3b91a7daf36..d244e724e19858 100644 --- a/drivers/clk/uniphier/clk-uniphier-sys.c +++ b/drivers/clk/uniphier/clk-uniphier-sys.c @@ -123,7 +123,7 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = { const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("spll", -1, "ref", 120, 1), /* 2400 MHz */ UNIPHIER_CLK_FACTOR("dapll1", -1, "ref", 128, 1), /* 2560 MHz */ - UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125), /* 2949.12 MHz */ + UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125), /* 2949.12 MHz */ UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40), UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48), UNIPHIER_PRO5_SYS_CLK_NAND(2), From 8356c5754cb9854a25e0cebac0ee94a352b3debe Mon Sep 17 00:00:00 2001 From: Zhong Kaihua Date: Mon, 7 Aug 2017 22:51:56 +0800 Subject: [PATCH 0559/3326] clk: hi3660: fix incorrect uart3 clock freqency [ Upstream commit d33fb1b9f0fcb67f2b9f8b1891465a088a9480f8 ] UART3 clock rate is doubled in previous commit. This error is not detected until recently a mezzanine board which makes real use of uart3 port (through LS connector of 96boards) was setup and tested on hi3660-hikey960 board. This patch changes clock source rate of clk_factor_uart3 to 100000000. Signed-off-by: Zhong Kaihua Signed-off-by: Guodong Xu Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clk/hisilicon/clk-hi3660.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c index a18258eb89cb1b..f404199596563e 100644 --- a/drivers/clk/hisilicon/clk-hi3660.c +++ b/drivers/clk/hisilicon/clk-hi3660.c @@ -34,7 +34,7 @@ static const struct hisi_fixed_rate_clock hi3660_fixed_rate_clks[] = { /* crgctrl */ static const struct hisi_fixed_factor_clock hi3660_crg_fixed_factor_clks[] = { - { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 8, 0, }, + { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 16, 0, }, { HI3660_CLK_FACTOR_MMC, "clk_factor_mmc", "clkin_sys", 1, 6, 0, }, { HI3660_CLK_GATE_I2C0, "clk_gate_i2c0", "clk_i2c0_iomcu", 1, 4, 0, }, { HI3660_CLK_GATE_I2C1, "clk_gate_i2c1", "clk_i2c1_iomcu", 1, 4, 0, }, From e5aa0e86f957709fc72d02ed3d5d0d775a2592f6 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 28 Sep 2017 11:18:53 +0100 Subject: [PATCH 0560/3326] mailbox: mailbox-test: don't rely on rx_buffer content to signal data ready [ Upstream commit e339c80af95e14de3712d69ddea09a3868fa14cd ] Currently we rely on the first byte of the Rx buffer to check if there's any data available to be read. If the first byte of the received buffer is zero (i.e. null character), then we fail to signal that data is available even when it's available. Instead introduce a boolean variable to track the data availability and update it in the channel receive callback as ready and clear it when the data is read. Signed-off-by: Sudeep Holla Signed-off-by: Jassi Brar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/mailbox/mailbox-test.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index 97fb956bb6e04a..93f3d4d61fa7a6 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c @@ -30,6 +30,7 @@ #define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \ (MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE)) +static bool mbox_data_ready; static struct dentry *root_debugfs_dir; struct mbox_test_device { @@ -152,16 +153,14 @@ static ssize_t mbox_test_message_write(struct file *filp, static bool mbox_test_message_data_ready(struct mbox_test_device *tdev) { - unsigned char data; + bool data_ready; unsigned long flags; spin_lock_irqsave(&tdev->lock, flags); - data = tdev->rx_buffer[0]; + data_ready = mbox_data_ready; spin_unlock_irqrestore(&tdev->lock, flags); - if (data != '\0') - return true; - return false; + return data_ready; } static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf, @@ -223,6 +222,7 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf, *(touser + l) = '\0'; memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN); + mbox_data_ready = false; spin_unlock_irqrestore(&tdev->lock, flags); @@ -292,6 +292,7 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message) message, MBOX_MAX_MSG_LEN); memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN); } + mbox_data_ready = true; spin_unlock_irqrestore(&tdev->lock, flags); wake_up_interruptible(&tdev->waitq); From 8928998d1ed8c4bf76d0ce75570665982266825c Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 30 Sep 2017 10:10:09 +0900 Subject: [PATCH 0561/3326] kbuild: rpm-pkg: fix jobserver unavailable warning [ Upstream commit 606625be47bc87b6fab0af10cd57aaa675cb9e42 ] If "make rpm-pkg" or "make binrpm-pkg" is run with -j[jobs] option, the following warning message is displayed. warning: jobserver unavailable: using -j1. Add '+' to parent make rule. Follow the suggestion. Signed-off-by: Masahiro Yamada Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- scripts/package/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/package/Makefile b/scripts/package/Makefile index 2b4aa4c19b214c..34de8b953ecfc4 100644 --- a/scripts/package/Makefile +++ b/scripts/package/Makefile @@ -49,7 +49,7 @@ rpm-pkg rpm: FORCE $(MAKE) clean $(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec $(call cmd,src_tar,$(KERNELPATH),kernel.spec) - rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz + +rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz rm $(KERNELPATH).tar.gz kernel.spec # binrpm-pkg @@ -57,7 +57,7 @@ rpm-pkg rpm: FORCE binrpm-pkg: FORCE $(MAKE) KBUILD_SRC= $(CONFIG_SHELL) $(MKSPEC) prebuilt > $(objtree)/binkernel.spec - rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \ + +rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \ $(UTS_MACHINE) -bb $(objtree)/binkernel.spec rm binkernel.spec From bbcedaeba78040aaa0f0d6a7f39add578f81f356 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 14 Nov 2017 13:42:38 +0530 Subject: [PATCH 0562/3326] atm: horizon: Fix irq release error [ Upstream commit bde533f2ea607cbbbe76ef8738b36243939a7bc2 ] atm_dev_register() can fail here and passed parameters to free irq which is not initialised. Initialization of 'dev->irq' happened after the 'goto out_free_irq'. So using 'irq' insted of 'dev->irq' in free_irq(). Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/atm/horizon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index 7e76b35f422c62..e121b848573108 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -2803,7 +2803,7 @@ static int hrz_probe(struct pci_dev *pci_dev, return err; out_free_irq: - free_irq(dev->irq, dev); + free_irq(irq, dev); out_free: kfree(dev); out_release: From 2de359062feea90c1e8baa2fa9771ba98d1fadfa Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Mon, 13 Nov 2017 16:48:47 -0500 Subject: [PATCH 0563/3326] jump_label: Invoke jump_label_test() via early_initcall() [ Upstream commit 92ee46efeb505ead3ab06d3c5ce695637ed5f152 ] Fengguang Wu reported that running the rcuperf test during boot can cause the jump_label_test() to hit a WARN_ON(). The issue is that the core jump label code relies on kernel_text_address() to detect when it can no longer update branches that may be contained in __init sections. The kernel_text_address() in turn assumes that if the system_state variable is greter than or equal to SYSTEM_RUNNING then __init sections are no longer valid (since the assumption is that they have been freed). However, when rcuperf is setup to run in early boot it can call kernel_power_off() which sets the system_state to SYSTEM_POWER_OFF. Since rcuperf initialization is invoked via a module_init(), we can make the dependency of jump_label_test() needing to complete before rcuperf explicit by calling it via early_initcall(). Reported-by: Fengguang Wu Signed-off-by: Jason Baron Acked-by: Paul E. McKenney Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1510609727-2238-1-git-send-email-jbaron@akamai.com Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/jump_label.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 0bf2e8f5244ae7..7c3774ac1d51f9 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -769,7 +769,7 @@ static __init int jump_label_test(void) return 0; } -late_initcall(jump_label_test); +early_initcall(jump_label_test); #endif /* STATIC_KEYS_SELFTEST */ #endif /* HAVE_JUMP_LABEL */ From f0e1cd056e99c1ac7ec59f46ad152c11f1b69570 Mon Sep 17 00:00:00 2001 From: Ilya Lesokhin Date: Mon, 13 Nov 2017 10:22:44 +0200 Subject: [PATCH 0564/3326] tls: Use kzalloc for aead_request allocation [ Upstream commit 61ef6da622aa7b66bf92991bd272490eea6c712e ] Use kzalloc for aead_request allocation as we don't set all the bits in the request. Fixes: 3c4d7559159b ('tls: kernel TLS support') Signed-off-by: Ilya Lesokhin Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/tls/tls_sw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 7d80040a37b6d5..f00383a3762205 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -219,7 +219,7 @@ static int tls_do_encryption(struct tls_context *tls_ctx, struct aead_request *aead_req; int rc; - aead_req = kmalloc(req_size, flags); + aead_req = kzalloc(req_size, flags); if (!aead_req) return -ENOMEM; From 6610b9cb80ad9a58f71940b9658e3116262663db Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 10 Nov 2017 14:14:06 +1100 Subject: [PATCH 0565/3326] xfrm: Copy policy family in clone_policy [ Upstream commit 0e74aa1d79a5bbc663e03a2804399cae418a0321 ] The syzbot found an ancient bug in the IPsec code. When we cloned a socket policy (for example, for a child TCP socket derived from a listening socket), we did not copy the family field. This results in a live policy with a zero family field. This triggers a BUG_ON check in the af_key code when the cloned policy is retrieved. This patch fixes it by copying the family field over. Reported-by: syzbot Signed-off-by: Herbert Xu Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/xfrm/xfrm_policy.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 6eb228a7013106..2a6093840e7e85 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1306,6 +1306,7 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) newp->xfrm_nr = old->xfrm_nr; newp->index = old->index; newp->type = old->type; + newp->family = old->family; memcpy(newp->xfrm_vec, old->xfrm_vec, newp->xfrm_nr*sizeof(struct xfrm_tmpl)); spin_lock_bh(&net->xfrm.xfrm_policy_lock); From a76d81af17ceadd8bae46da297d8e24dd8ea0838 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Mon, 13 Nov 2017 17:32:39 +0800 Subject: [PATCH 0566/3326] f2fs: fix to clear FI_NO_PREALLOC [ Upstream commit 28cfafb73853f0494b06649716687a3ea07681d5 ] We need to clear FI_NO_PREALLOC flag in error path of f2fs_file_write_iter, otherwise we will lose the chance to preallocate blocks in latter write() at one time. Fixes: dc91de78e5e1 ("f2fs: do not preallocate blocks which has wrong buffer") Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/f2fs/file.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 6ce4678723762a..b8372095ba0a6e 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -2697,6 +2697,7 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) err = f2fs_preallocate_blocks(iocb, from); if (err) { + clear_inode_flag(inode, FI_NO_PREALLOC); inode_unlock(inode); return err; } From 971110c72991c9c4662d0abf48bd6275b7e211f7 Mon Sep 17 00:00:00 2001 From: Sriharsha Basavapatna Date: Fri, 3 Nov 2017 02:39:04 +0530 Subject: [PATCH 0567/3326] bnxt_re: changing the ip address shouldn't affect new connections [ Upstream commit 063fb5bd1a01937094f40169a20e4aa5ca030db1 ] While adding a new gid, the driver currently does not return the context back to the stack. A subsequent del_gid() (e.g, when ip address is changed) doesn't find the right context in the driver and it ends up dropping that request. This results in the HW caching a stale gid entry and traffic fails because of that. Fix by returning the proper context in bnxt_re_add_gid(). Signed-off-by: Sriharsha Basavapatna Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 0d89621d9fe8ed..b210495ff33c04 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -394,6 +394,7 @@ int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num, ctx->idx = tbl_idx; ctx->refcnt = 1; ctx_tbl[tbl_idx] = ctx; + *context = ctx; return rc; } From 192c689319429672e15d73969fa0230e7379779a Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Thu, 2 Nov 2017 15:22:26 +0200 Subject: [PATCH 0568/3326] IB/mlx4: Increase maximal message size under UD QP [ Upstream commit 5f22a1d87c5315a98981ecf93cd8de226cffe6ca ] Maximal message should be used as a limit to the max message payload allowed, without the headers. The ConnectX-3 check is done against this value includes the headers. When the payload is 4K this will cause the NIC to drop packets. Increase maximal message to 8K as workaround, this shouldn't change current behaviour because we continue to set the MTU to 4k. To reproduce; set MTU to 4296 on the corresponding interface, for example: ifconfig eth0 mtu 4296 (both server and client) On server: ib_send_bw -c UD -d mlx4_0 -s 4096 -n 1000000 -i1 -m 4096 On client: ib_send_bw -d mlx4_0 -c UD -s 4096 -n 1000000 -i 1 -m 4096 Fixes: 6e0d733d9215 ("IB/mlx4: Allow 4K messages for UD QPs") Signed-off-by: Mark Bloch Reviewed-by: Majd Dibbiny Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/mlx4/qp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index b6b33d99b0b410..17e44c86577a9b 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -2216,7 +2216,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, context->mtu_msgmax = (IB_MTU_4096 << 5) | ilog2(dev->dev->caps.max_gso_sz); else - context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; + context->mtu_msgmax = (IB_MTU_4096 << 5) | 13; } else if (attr_mask & IB_QP_PATH_MTU) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { pr_err("path MTU (%u) is invalid\n", From d702be100eaf043b97687c050565c154a48d763d Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Mon, 30 Oct 2017 14:23:13 +0200 Subject: [PATCH 0569/3326] IB/mlx5: Assign send CQ and recv CQ of UMR QP [ Upstream commit 31fde034a8bd964a5c7c1a5663fc87a913158db2 ] The UMR's QP is created by calling mlx5_ib_create_qp directly, and therefore the send CQ and the recv CQ on the ibqp weren't assigned. Assign them right after calling the mlx5_ib_create_qp to assure that any access to those pointers will work as expected and won't crash the system as might happen as part of reset flow. Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") Signed-off-by: Majd Dibbiny Reviewed-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/mlx5/main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 552f7bd4ecc38b..5aff1e33d98408 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3097,6 +3097,8 @@ static int create_umr_res(struct mlx5_ib_dev *dev) qp->real_qp = qp; qp->uobject = NULL; qp->qp_type = MLX5_IB_QPT_REG_UMR; + qp->send_cq = init_attr->send_cq; + qp->recv_cq = init_attr->recv_cq; attr->qp_state = IB_QPS_INIT; attr->port_num = 1; From 49e186e3278d0d800021b59286dc97d748593fd6 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 2 Nov 2017 15:27:51 +0000 Subject: [PATCH 0570/3326] afs: Fix total-length calculation for multiple-page send [ Upstream commit 1199db603511d7463d9d3840f96f61967affc766 ] Fix the total-length calculation in afs_make_call() when the operation being dispatched has data from a series of pages attached. Despite the patched code looking like that it should reduce mathematically to the current code, it doesn't because the 32-bit unsigned arithmetic being used to calculate the page-offset-difference doesn't correctly extend to a 64-bit value when the result is effectively negative. Without this, some FS.StoreData operations that span multiple pages fail, reporting too little or too much data. Signed-off-by: David Howells Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/afs/rxrpc.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 0bf191f0dbafa7..9f715c3edcf967 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -377,8 +377,17 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, */ tx_total_len = call->request_size; if (call->send_pages) { - tx_total_len += call->last_to - call->first_offset; - tx_total_len += (call->last - call->first) * PAGE_SIZE; + if (call->last == call->first) { + tx_total_len += call->last_to - call->first_offset; + } else { + /* It looks mathematically like you should be able to + * combine the following lines with the ones above, but + * unsigned arithmetic is fun when it wraps... + */ + tx_total_len += PAGE_SIZE - call->first_offset; + tx_total_len += call->last_to; + tx_total_len += (call->last - call->first - 1) * PAGE_SIZE; + } } /* create a call */ From 80e642c066f9882007174be870c1465f69df8257 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 2 Nov 2017 15:27:48 +0000 Subject: [PATCH 0571/3326] afs: Connect up the CB.ProbeUuid [ Upstream commit f4b3526d83c40dd8bf5948b9d7a1b2c340f0dcc8 ] The handler for the CB.ProbeUuid operation in the cache manager is implemented, but isn't listed in the switch-statement of operation selection, so won't be used. Fix this by adding it. Signed-off-by: David Howells Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/afs/cmservice.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 782d4d05a53ba3..c7475867a52b35 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -127,6 +127,9 @@ bool afs_cm_incoming_call(struct afs_call *call) case CBProbe: call->type = &afs_SRXCBProbe; return true; + case CBProbeUuid: + call->type = &afs_SRXCBProbeUuid; + return true; case CBTellMeAboutYourself: call->type = &afs_SRXCBTellMeAboutYourself; return true; From 5fd159e1ee6a87a72626139813034f24f047d0e6 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 14 Dec 2017 09:53:15 +0100 Subject: [PATCH 0572/3326] Linux 4.14.6 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 43ac7bdb10adad..eabbd7748a2461 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 5 +SUBLEVEL = 6 EXTRAVERSION = NAME = Petit Gorille From fc038e59f316d2255f6d3c05d0d6b117d4739574 Mon Sep 17 00:00:00 2001 From: Sebastian Sjoholm Date: Mon, 20 Nov 2017 19:05:17 +0100 Subject: [PATCH 0573/3326] net: qmi_wwan: add Quectel BG96 2c7c:0296 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit f9409e7f086fa6c4623769b4b2f4f17a024d8143 ] Quectel BG96 is an Qualcomm MDM9206 based IoT modem, supporting both CAT-M and NB-IoT. Tested hardware is BG96 mounted on Quectel development board (EVB). The USB id is added to qmi_wwan.c to allow QMI communication with the BG96. Signed-off-by: Sebastian Sjoholm Acked-by: Bjørn Mork Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 8d4a6f7cba610a..f4ed553929f0a8 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1239,6 +1239,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ + {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ From fb89f5b05ab239ea14f87c02fcac05c957818f90 Mon Sep 17 00:00:00 2001 From: Sunil Goutham Date: Thu, 23 Nov 2017 22:34:31 +0300 Subject: [PATCH 0574/3326] net: thunderx: Fix TCP/UDP checksum offload for IPv6 pkts [ Upstream commit fa6d7cb5d76cf0467c61420fc9238045aedfd379 ] Don't offload IP header checksum to NIC. This fixes a previous patch which enabled checksum offloading for both IPv4 and IPv6 packets. So L3 checksum offload was getting enabled for IPv6 pkts. And HW is dropping these pkts as it assumes the pkt is IPv4 when IP csum offload is set in the SQ descriptor. Fixes: 3a9024f52c2e ("net: thunderx: Enable TSO and checksum offloads for ipv6") Signed-off-by: Sunil Goutham Signed-off-by: Aleksey Makarov Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d4496e9afcdf37..8b2c31e2a2b028 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1355,7 +1355,6 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, /* Offload checksum calculation to HW */ if (skb->ip_summed == CHECKSUM_PARTIAL) { - hdr->csum_l3 = 1; /* Enable IP csum calculation */ hdr->l3_offset = skb_network_offset(skb); hdr->l4_offset = skb_transport_offset(skb); From ec9a6722173d3dbbbbcc5eec2f5b0f2c58057bc9 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 6 Dec 2017 01:04:50 +0100 Subject: [PATCH 0575/3326] net: thunderx: Fix TCP/UDP checksum offload for IPv4 pkts [ Upstream commit 134059fd2775be79e26c2dff87d25cc2f6ea5626 ] Offload IP header checksum to NIC. This fixes a previous patch which disabled checksum offloading for both IPv4 and IPv6 packets. So L3 checksum offload was getting disabled for IPv4 pkts. And HW is dropping these pkts for some reason. Without this patch, IPv4 TSO appears to be broken: WIthout this patch I get ~16kbyte/s, with patch close to 2mbyte/s when copying files via scp from test box to my home workstation. Looking at tcpdump on sender it looks like hardware drops IPv4 TSO skbs. This patch restores performance for me, ipv6 looks good too. Fixes: fa6d7cb5d76c ("net: thunderx: Fix TCP/UDP checksum offload for IPv6 pkts") Cc: Sunil Goutham Cc: Aleksey Makarov Cc: Eric Dumazet Signed-off-by: Florian Westphal Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 8b2c31e2a2b028..a3d12dbde95b6d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1355,6 +1355,8 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, /* Offload checksum calculation to HW */ if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (ip.v4->version == 4) + hdr->csum_l3 = 1; /* Enable IP csum calculation */ hdr->l3_offset = skb_network_offset(skb); hdr->l4_offset = skb_transport_offset(skb); From c7203f55d53f9237bda0c0b6cf02f22f18262a5b Mon Sep 17 00:00:00 2001 From: Tobias Jakobi Date: Tue, 21 Nov 2017 16:15:57 +0100 Subject: [PATCH 0576/3326] net: realtek: r8169: implement set_link_ksettings() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 9e77d7a5549dc4d4999a60676373ab3fd1dae4db ] Commit 6fa1ba61520576cf1346c4ff09a056f2950cb3bf partially implemented the new ethtool API, by replacing get_settings() with get_link_ksettings(). This breaks ethtool, since the userspace tool (according to the new API specs) never tries the legacy set() call, when the new get() call succeeds. All attempts to chance some setting from userspace result in: > Cannot set new settings: Operation not supported Implement the missing set() call. Signed-off-by: Tobias Jakobi Tested-by: Holger Hoffstätte Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/realtek/r8169.c | 38 ++++++++++++++++------------ 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index a3c949ea7d1a24..9541465e43e975 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -2025,21 +2025,6 @@ static int rtl8169_set_speed(struct net_device *dev, return ret; } -static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int ret; - - del_timer_sync(&tp->timer); - - rtl_lock_work(tp); - ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), - cmd->duplex, cmd->advertising); - rtl_unlock_work(tp); - - return ret; -} - static netdev_features_t rtl8169_fix_features(struct net_device *dev, netdev_features_t features) { @@ -2166,6 +2151,27 @@ static int rtl8169_get_link_ksettings(struct net_device *dev, return rc; } +static int rtl8169_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int rc; + u32 advertising; + + if (!ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising)) + return -EINVAL; + + del_timer_sync(&tp->timer); + + rtl_lock_work(tp); + rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed, + cmd->base.duplex, advertising); + rtl_unlock_work(tp); + + return rc; +} + static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { @@ -2367,7 +2373,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_drvinfo = rtl8169_get_drvinfo, .get_regs_len = rtl8169_get_regs_len, .get_link = ethtool_op_get_link, - .set_settings = rtl8169_set_settings, .get_msglevel = rtl8169_get_msglevel, .set_msglevel = rtl8169_set_msglevel, .get_regs = rtl8169_get_regs, @@ -2379,6 +2384,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .nway_reset = rtl8169_nway_reset, .get_link_ksettings = rtl8169_get_link_ksettings, + .set_link_ksettings = rtl8169_set_link_ksettings, }; static void rtl8169_get_mac_version(struct rtl8169_private *tp, From 6efcd7eada3e76096e574e27d4e56051ffd328ab Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 18 Oct 2017 17:40:17 +0200 Subject: [PATCH 0577/3326] s390/qeth: fix early exit from error path [ Upstream commit 83cf79a2fec3cf499eb6cb9eb608656fc2a82776 ] When the allocation of the addr buffer fails, we need to free our refcount on the inetdevice before returning. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/s390/net/qeth_l3_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ab661a431f7c64..1d8c317553ba83 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1553,7 +1553,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); if (!addr) - return; + goto out; spin_lock_bh(&card->ip_lock); @@ -1567,6 +1567,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, spin_unlock_bh(&card->ip_lock); kfree(addr); +out: in_dev_put(in_dev); } @@ -1591,7 +1592,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); if (!addr) - return; + goto out; spin_lock_bh(&card->ip_lock); @@ -1606,6 +1607,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, spin_unlock_bh(&card->ip_lock); kfree(addr); +out: in6_dev_put(in6_dev); #endif /* CONFIG_QETH_IPV6 */ } From 1933fa485194e697b4d90853f338029ab19e4a72 Mon Sep 17 00:00:00 2001 From: Jon Maloy Date: Mon, 4 Dec 2017 22:00:20 +0100 Subject: [PATCH 0578/3326] tipc: fix memory leak in tipc_accept_from_sock() [ Upstream commit a7d5f107b4978e08eeab599ee7449af34d034053 ] When the function tipc_accept_from_sock() fails to create an instance of struct tipc_subscriber it omits to free the already created instance of struct tipc_conn instance before it returns. We fix that with this commit. Reported-by: David S. Miller Signed-off-by: Jon Maloy Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tipc/server.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/tipc/server.c b/net/tipc/server.c index 3cd6402e812cb0..f4c1b18c5fb07c 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c @@ -313,6 +313,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con) newcon->usr_data = s->tipc_conn_new(newcon->conid); if (!newcon->usr_data) { sock_release(newsock); + conn_put(newcon); return -ENOMEM; } From dee5b428c3b71d7201d49afd38a110fccb352922 Mon Sep 17 00:00:00 2001 From: Wei Xu Date: Fri, 1 Dec 2017 05:10:36 -0500 Subject: [PATCH 0579/3326] vhost: fix skb leak in handle_rx() [ Upstream commit 6e474083f3daf3a3546737f5d7d502ad12eb257c ] Matthew found a roughly 40% tcp throughput regression with commit c67df11f(vhost_net: try batch dequing from skb array) as discussed in the following thread: https://www.mail-archive.com/netdev@vger.kernel.org/msg187936.html Eventually we figured out that it was a skb leak in handle_rx() when sending packets to the VM. This usually happens when a guest can not drain out vq as fast as vhost fills in, afterwards it sets off the traffic jam and leaks skb(s) which occurs as no headcount to send on the vq from vhost side. This can be avoided by making sure we have got enough headcount before actually consuming a skb from the batched rx array while transmitting, which is simply done by moving checking the zero headcount a bit ahead. Signed-off-by: Wei Xu Reported-by: Matthew Rosato Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/vhost/net.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 58585ec8699e8d..bd15309ac5f167 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -782,16 +782,6 @@ static void handle_rx(struct vhost_net *net) /* On error, stop handling until the next kick. */ if (unlikely(headcount < 0)) goto out; - if (nvq->rx_array) - msg.msg_control = vhost_net_buf_consume(&nvq->rxq); - /* On overrun, truncate and discard */ - if (unlikely(headcount > UIO_MAXIOV)) { - iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); - err = sock->ops->recvmsg(sock, &msg, - 1, MSG_DONTWAIT | MSG_TRUNC); - pr_debug("Discarded rx packet: len %zd\n", sock_len); - continue; - } /* OK, now we need to know about added descriptors. */ if (!headcount) { if (unlikely(vhost_enable_notify(&net->dev, vq))) { @@ -804,6 +794,16 @@ static void handle_rx(struct vhost_net *net) * they refilled. */ goto out; } + if (nvq->rx_array) + msg.msg_control = vhost_net_buf_consume(&nvq->rxq); + /* On overrun, truncate and discard */ + if (unlikely(headcount > UIO_MAXIOV)) { + iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); + err = sock->ops->recvmsg(sock, &msg, + 1, MSG_DONTWAIT | MSG_TRUNC); + pr_debug("Discarded rx packet: len %zd\n", sock_len); + continue; + } /* We don't need to be notified again. */ iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len); fixup = msg.msg_iter; From 9d9a63d74b2b6ed7c30c7d1584d87c16ae8d5862 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20Bugge?= Date: Wed, 6 Dec 2017 17:18:28 +0100 Subject: [PATCH 0580/3326] rds: Fix NULL pointer dereference in __rds_rdma_map MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit f3069c6d33f6ae63a1668737bc78aaaa51bff7ca ] This is a fix for syzkaller719569, where memory registration was attempted without any underlying transport being loaded. Analysis of the case reveals that it is the setsockopt() RDS_GET_MR (2) and RDS_GET_MR_FOR_DEST (7) that are vulnerable. Here is an example stack trace when the bug is hit: BUG: unable to handle kernel NULL pointer dereference at 00000000000000c0 IP: __rds_rdma_map+0x36/0x440 [rds] PGD 2f93d03067 P4D 2f93d03067 PUD 2f93d02067 PMD 0 Oops: 0000 [#1] SMP Modules linked in: bridge stp llc tun rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache rds binfmt_misc sb_edac intel_powerclamp coretemp kvm_intel kvm irqbypass crct10dif_pclmul c rc32_pclmul ghash_clmulni_intel pcbc aesni_intel crypto_simd glue_helper cryptd iTCO_wdt mei_me sg iTCO_vendor_support ipmi_si mei ipmi_devintf nfsd shpchp pcspkr i2c_i801 ioatd ma ipmi_msghandler wmi lpc_ich mfd_core auth_rpcgss nfs_acl lockd grace sunrpc ip_tables ext4 mbcache jbd2 mgag200 i2c_algo_bit drm_kms_helper ixgbe syscopyarea ahci sysfillrect sysimgblt libahci mdio fb_sys_fops ttm ptp libata sd_mod mlx4_core drm crc32c_intel pps_core megaraid_sas i2c_core dca dm_mirror dm_region_hash dm_log dm_mod CPU: 48 PID: 45787 Comm: repro_set2 Not tainted 4.14.2-3.el7uek.x86_64 #2 Hardware name: Oracle Corporation ORACLE SERVER X5-2L/ASM,MOBO TRAY,2U, BIOS 31110000 03/03/2017 task: ffff882f9190db00 task.stack: ffffc9002b994000 RIP: 0010:__rds_rdma_map+0x36/0x440 [rds] RSP: 0018:ffffc9002b997df0 EFLAGS: 00010202 RAX: 0000000000000000 RBX: ffff882fa2182580 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffffc9002b997e40 RDI: ffff882fa2182580 RBP: ffffc9002b997e30 R08: 0000000000000000 R09: 0000000000000002 R10: ffff885fb29e3838 R11: 0000000000000000 R12: ffff882fa2182580 R13: ffff882fa2182580 R14: 0000000000000002 R15: 0000000020000ffc FS: 00007fbffa20b700(0000) GS:ffff882fbfb80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000000000c0 CR3: 0000002f98a66006 CR4: 00000000001606e0 Call Trace: rds_get_mr+0x56/0x80 [rds] rds_setsockopt+0x172/0x340 [rds] ? __fget_light+0x25/0x60 ? __fdget+0x13/0x20 SyS_setsockopt+0x80/0xe0 do_syscall_64+0x67/0x1b0 entry_SYSCALL64_slow_path+0x25/0x25 RIP: 0033:0x7fbff9b117f9 RSP: 002b:00007fbffa20aed8 EFLAGS: 00000293 ORIG_RAX: 0000000000000036 RAX: ffffffffffffffda RBX: 00000000000c84a4 RCX: 00007fbff9b117f9 RDX: 0000000000000002 RSI: 0000400000000114 RDI: 000000000000109b RBP: 00007fbffa20af10 R08: 0000000000000020 R09: 00007fbff9dd7860 R10: 0000000020000ffc R11: 0000000000000293 R12: 0000000000000000 R13: 00007fbffa20b9c0 R14: 00007fbffa20b700 R15: 0000000000000021 Code: 41 56 41 55 49 89 fd 41 54 53 48 83 ec 18 8b 87 f0 02 00 00 48 89 55 d0 48 89 4d c8 85 c0 0f 84 2d 03 00 00 48 8b 87 00 03 00 00 <48> 83 b8 c0 00 00 00 00 0f 84 25 03 00 0 0 48 8b 06 48 8b 56 08 The fix is to check the existence of an underlying transport in __rds_rdma_map(). Signed-off-by: Håkon Bugge Reported-by: syzbot Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/rds/rdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 8886f15abe90e1..bc2f1e0977d657 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -183,7 +183,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, long i; int ret; - if (rs->rs_bound_addr == 0) { + if (rs->rs_bound_addr == 0 || !rs->rs_transport) { ret = -ENOTCONN; /* XXX not a great errno */ goto out; } From dacf127383fc2dabd2befbead91cbbf22c3dc2ab Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Thu, 30 Nov 2017 10:41:14 +0800 Subject: [PATCH 0581/3326] sit: update frag_off info [ Upstream commit f859b4af1c52493ec21173ccc73d0b60029b5b88 ] After parsing the sit netlink change info, we forget to update frag_off in ipip6_tunnel_update(). Fix it by assigning frag_off with new value. Reported-by: Jianlin Shi Signed-off-by: Hangbin Liu Acked-by: Nicolas Dichtel Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/sit.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index ac912bb217471c..e79854cc579081 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1087,6 +1087,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p, ipip6_tunnel_link(sitn, t); t->parms.iph.ttl = p->iph.ttl; t->parms.iph.tos = p->iph.tos; + t->parms.iph.frag_off = p->iph.frag_off; if (t->parms.link != p->link || t->fwmark != fwmark) { t->parms.link = p->link; t->fwmark = fwmark; From b604eb8deaed5be43c0402008d0a76cedaacf940 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 3 Dec 2017 09:32:59 -0800 Subject: [PATCH 0582/3326] tcp: add tcp_v4_fill_cb()/tcp_v4_restore_cb() [ Upstream commit eeea10b83a139451130df1594f26710c8fa390c8 ] James Morris reported kernel stack corruption bug [1] while running the SELinux testsuite, and bisected to a recent commit bffa72cf7f9d ("net: sk_buff rbnode reorg") We believe this commit is fine, but exposes an older bug. SELinux code runs from tcp_filter() and might send an ICMP, expecting IP options to be found in skb->cb[] using regular IPCB placement. We need to defer TCP mangling of skb->cb[] after tcp_filter() calls. This patch adds tcp_v4_fill_cb()/tcp_v4_restore_cb() in a very similar way we added them for IPv6. [1] [ 339.806024] SELinux: failure in selinux_parse_skb(), unable to parse packet [ 339.822505] Kernel panic - not syncing: stack-protector: Kernel stack is corrupted in: ffffffff81745af5 [ 339.822505] [ 339.852250] CPU: 4 PID: 3642 Comm: client Not tainted 4.15.0-rc1-test #15 [ 339.868498] Hardware name: LENOVO 10FGS0VA1L/30BC, BIOS FWKT68A 01/19/2017 [ 339.885060] Call Trace: [ 339.896875] [ 339.908103] dump_stack+0x63/0x87 [ 339.920645] panic+0xe8/0x248 [ 339.932668] ? ip_push_pending_frames+0x33/0x40 [ 339.946328] ? icmp_send+0x525/0x530 [ 339.958861] ? kfree_skbmem+0x60/0x70 [ 339.971431] __stack_chk_fail+0x1b/0x20 [ 339.984049] icmp_send+0x525/0x530 [ 339.996205] ? netlbl_skbuff_err+0x36/0x40 [ 340.008997] ? selinux_netlbl_err+0x11/0x20 [ 340.021816] ? selinux_socket_sock_rcv_skb+0x211/0x230 [ 340.035529] ? security_sock_rcv_skb+0x3b/0x50 [ 340.048471] ? sk_filter_trim_cap+0x44/0x1c0 [ 340.061246] ? tcp_v4_inbound_md5_hash+0x69/0x1b0 [ 340.074562] ? tcp_filter+0x2c/0x40 [ 340.086400] ? tcp_v4_rcv+0x820/0xa20 [ 340.098329] ? ip_local_deliver_finish+0x71/0x1a0 [ 340.111279] ? ip_local_deliver+0x6f/0xe0 [ 340.123535] ? ip_rcv_finish+0x3a0/0x3a0 [ 340.135523] ? ip_rcv_finish+0xdb/0x3a0 [ 340.147442] ? ip_rcv+0x27c/0x3c0 [ 340.158668] ? inet_del_offload+0x40/0x40 [ 340.170580] ? __netif_receive_skb_core+0x4ac/0x900 [ 340.183285] ? rcu_accelerate_cbs+0x5b/0x80 [ 340.195282] ? __netif_receive_skb+0x18/0x60 [ 340.207288] ? process_backlog+0x95/0x140 [ 340.218948] ? net_rx_action+0x26c/0x3b0 [ 340.230416] ? __do_softirq+0xc9/0x26a [ 340.241625] ? do_softirq_own_stack+0x2a/0x40 [ 340.253368] [ 340.262673] ? do_softirq+0x50/0x60 [ 340.273450] ? __local_bh_enable_ip+0x57/0x60 [ 340.285045] ? ip_finish_output2+0x175/0x350 [ 340.296403] ? ip_finish_output+0x127/0x1d0 [ 340.307665] ? nf_hook_slow+0x3c/0xb0 [ 340.318230] ? ip_output+0x72/0xe0 [ 340.328524] ? ip_fragment.constprop.54+0x80/0x80 [ 340.340070] ? ip_local_out+0x35/0x40 [ 340.350497] ? ip_queue_xmit+0x15c/0x3f0 [ 340.361060] ? __kmalloc_reserve.isra.40+0x31/0x90 [ 340.372484] ? __skb_clone+0x2e/0x130 [ 340.382633] ? tcp_transmit_skb+0x558/0xa10 [ 340.393262] ? tcp_connect+0x938/0xad0 [ 340.403370] ? ktime_get_with_offset+0x4c/0xb0 [ 340.414206] ? tcp_v4_connect+0x457/0x4e0 [ 340.424471] ? __inet_stream_connect+0xb3/0x300 [ 340.435195] ? inet_stream_connect+0x3b/0x60 [ 340.445607] ? SYSC_connect+0xd9/0x110 [ 340.455455] ? __audit_syscall_entry+0xaf/0x100 [ 340.466112] ? syscall_trace_enter+0x1d0/0x2b0 [ 340.476636] ? __audit_syscall_exit+0x209/0x290 [ 340.487151] ? SyS_connect+0xe/0x10 [ 340.496453] ? do_syscall_64+0x67/0x1b0 [ 340.506078] ? entry_SYSCALL64_slow_path+0x25/0x25 Fixes: 971f10eca186 ("tcp: better TCP_SKB_CB layout to reduce cache line misses") Signed-off-by: Eric Dumazet Reported-by: James Morris Tested-by: James Morris Tested-by: Casey Schaufler Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_ipv4.c | 59 ++++++++++++++++++++++++++++++--------------- net/ipv6/tcp_ipv6.c | 10 +++++--- 2 files changed, 46 insertions(+), 23 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5b027c69cbc540..5a5ed4f1467839 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1587,6 +1587,34 @@ int tcp_filter(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(tcp_filter); +static void tcp_v4_restore_cb(struct sk_buff *skb) +{ + memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4, + sizeof(struct inet_skb_parm)); +} + +static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph, + const struct tcphdr *th) +{ + /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() + * barrier() makes sure compiler wont play fool^Waliasing games. + */ + memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb), + sizeof(struct inet_skb_parm)); + barrier(); + + TCP_SKB_CB(skb)->seq = ntohl(th->seq); + TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + + skb->len - th->doff * 4); + TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); + TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); + TCP_SKB_CB(skb)->tcp_tw_isn = 0; + TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); + TCP_SKB_CB(skb)->sacked = 0; + TCP_SKB_CB(skb)->has_rxtstamp = + skb->tstamp || skb_hwtstamps(skb)->hwtstamp; +} + /* * From tcp_input.c */ @@ -1627,24 +1655,6 @@ int tcp_v4_rcv(struct sk_buff *skb) th = (const struct tcphdr *)skb->data; iph = ip_hdr(skb); - /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() - * barrier() makes sure compiler wont play fool^Waliasing games. - */ - memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb), - sizeof(struct inet_skb_parm)); - barrier(); - - TCP_SKB_CB(skb)->seq = ntohl(th->seq); - TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + - skb->len - th->doff * 4); - TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); - TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); - TCP_SKB_CB(skb)->tcp_tw_isn = 0; - TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); - TCP_SKB_CB(skb)->sacked = 0; - TCP_SKB_CB(skb)->has_rxtstamp = - skb->tstamp || skb_hwtstamps(skb)->hwtstamp; - lookup: sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, th->dest, sdif, &refcounted); @@ -1675,14 +1685,19 @@ int tcp_v4_rcv(struct sk_buff *skb) sock_hold(sk); refcounted = true; nsk = NULL; - if (!tcp_filter(sk, skb)) + if (!tcp_filter(sk, skb)) { + th = (const struct tcphdr *)skb->data; + iph = ip_hdr(skb); + tcp_v4_fill_cb(skb, iph, th); nsk = tcp_check_req(sk, skb, req, false); + } if (!nsk) { reqsk_put(req); goto discard_and_relse; } if (nsk == sk) { reqsk_put(req); + tcp_v4_restore_cb(skb); } else if (tcp_child_process(sk, nsk, skb)) { tcp_v4_send_reset(nsk, skb); goto discard_and_relse; @@ -1708,6 +1723,7 @@ int tcp_v4_rcv(struct sk_buff *skb) goto discard_and_relse; th = (const struct tcphdr *)skb->data; iph = ip_hdr(skb); + tcp_v4_fill_cb(skb, iph, th); skb->dev = NULL; @@ -1738,6 +1754,8 @@ int tcp_v4_rcv(struct sk_buff *skb) if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; + tcp_v4_fill_cb(skb, iph, th); + if (tcp_checksum_complete(skb)) { csum_error: __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); @@ -1764,6 +1782,8 @@ int tcp_v4_rcv(struct sk_buff *skb) goto discard_it; } + tcp_v4_fill_cb(skb, iph, th); + if (tcp_checksum_complete(skb)) { inet_twsk_put(inet_twsk(sk)); goto csum_error; @@ -1780,6 +1800,7 @@ int tcp_v4_rcv(struct sk_buff *skb) if (sk2) { inet_twsk_deschedule_put(inet_twsk(sk)); sk = sk2; + tcp_v4_restore_cb(skb); refcounted = false; goto process; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 64d94afa427f81..067350ac843a11 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1448,7 +1448,6 @@ static int tcp_v6_rcv(struct sk_buff *skb) struct sock *nsk; sk = req->rsk_listener; - tcp_v6_fill_cb(skb, hdr, th); if (tcp_v6_inbound_md5_hash(sk, skb)) { sk_drops_add(sk, skb); reqsk_put(req); @@ -1461,8 +1460,12 @@ static int tcp_v6_rcv(struct sk_buff *skb) sock_hold(sk); refcounted = true; nsk = NULL; - if (!tcp_filter(sk, skb)) + if (!tcp_filter(sk, skb)) { + th = (const struct tcphdr *)skb->data; + hdr = ipv6_hdr(skb); + tcp_v6_fill_cb(skb, hdr, th); nsk = tcp_check_req(sk, skb, req, false); + } if (!nsk) { reqsk_put(req); goto discard_and_relse; @@ -1486,8 +1489,6 @@ static int tcp_v6_rcv(struct sk_buff *skb) if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; - tcp_v6_fill_cb(skb, hdr, th); - if (tcp_v6_inbound_md5_hash(sk, skb)) goto discard_and_relse; @@ -1495,6 +1496,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) goto discard_and_relse; th = (const struct tcphdr *)skb->data; hdr = ipv6_hdr(skb); + tcp_v6_fill_cb(skb, hdr, th); skb->dev = NULL; From 7263f11b567193235a676584dfefd0b46c8b08a1 Mon Sep 17 00:00:00 2001 From: Mike Maloney Date: Tue, 28 Nov 2017 10:44:29 -0500 Subject: [PATCH 0583/3326] packet: fix crash in fanout_demux_rollover() syzkaller found a race condition fanout_demux_rollover() while removing a packet socket from a fanout group. po->rollover is read and operated on during packet_rcv_fanout(), via fanout_demux_rollover(), but the pointer is currently cleared before the synchronization in packet_release(). It is safer to delay the cleanup until after synchronize_net() has been called, ensuring all calls to packet_rcv_fanout() for this socket have finished. To further simplify synchronization around the rollover structure, set po->rollover in fanout_add() only if there are no errors. This removes the need for rcu in the struct and in the call to packet_getsockopt(..., PACKET_ROLLOVER_STATS, ...). Crashing stack trace: fanout_demux_rollover+0xb6/0x4d0 net/packet/af_packet.c:1392 packet_rcv_fanout+0x649/0x7c8 net/packet/af_packet.c:1487 dev_queue_xmit_nit+0x835/0xc10 net/core/dev.c:1953 xmit_one net/core/dev.c:2975 [inline] dev_hard_start_xmit+0x16b/0xac0 net/core/dev.c:2995 __dev_queue_xmit+0x17a4/0x2050 net/core/dev.c:3476 dev_queue_xmit+0x17/0x20 net/core/dev.c:3509 neigh_connected_output+0x489/0x720 net/core/neighbour.c:1379 neigh_output include/net/neighbour.h:482 [inline] ip6_finish_output2+0xad1/0x22a0 net/ipv6/ip6_output.c:120 ip6_finish_output+0x2f9/0x920 net/ipv6/ip6_output.c:146 NF_HOOK_COND include/linux/netfilter.h:239 [inline] ip6_output+0x1f4/0x850 net/ipv6/ip6_output.c:163 dst_output include/net/dst.h:459 [inline] NF_HOOK.constprop.35+0xff/0x630 include/linux/netfilter.h:250 mld_sendpack+0x6a8/0xcc0 net/ipv6/mcast.c:1660 mld_send_initial_cr.part.24+0x103/0x150 net/ipv6/mcast.c:2072 mld_send_initial_cr net/ipv6/mcast.c:2056 [inline] ipv6_mc_dad_complete+0x99/0x130 net/ipv6/mcast.c:2079 addrconf_dad_completed+0x595/0x970 net/ipv6/addrconf.c:4039 addrconf_dad_work+0xac9/0x1160 net/ipv6/addrconf.c:3971 process_one_work+0xbf0/0x1bc0 kernel/workqueue.c:2113 worker_thread+0x223/0x1990 kernel/workqueue.c:2247 kthread+0x35e/0x430 kernel/kthread.c:231 ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:432 Fixes: 0648ab70afe6 ("packet: rollover prepare: per-socket state") Fixes: 509c7a1ecc860 ("packet: avoid panic in packet_getsockopt()") Reported-by: syzbot Signed-off-by: Mike Maloney Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/packet/af_packet.c | 32 ++++++++++---------------------- net/packet/internal.h | 1 - 2 files changed, 10 insertions(+), 23 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 2986941164b195..2eb4f1e5b8613d 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1697,7 +1697,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) atomic_long_set(&rollover->num, 0); atomic_long_set(&rollover->num_huge, 0); atomic_long_set(&rollover->num_failed, 0); - po->rollover = rollover; } if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { @@ -1755,6 +1754,8 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { __dev_remove_pack(&po->prot_hook); po->fanout = match; + po->rollover = rollover; + rollover = NULL; refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); __fanout_link(sk, po); err = 0; @@ -1768,10 +1769,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) } out: - if (err && rollover) { - kfree_rcu(rollover, rcu); - po->rollover = NULL; - } + kfree(rollover); mutex_unlock(&fanout_mutex); return err; } @@ -1795,11 +1793,6 @@ static struct packet_fanout *fanout_release(struct sock *sk) list_del(&f->list); else f = NULL; - - if (po->rollover) { - kfree_rcu(po->rollover, rcu); - po->rollover = NULL; - } } mutex_unlock(&fanout_mutex); @@ -3039,6 +3032,7 @@ static int packet_release(struct socket *sock) synchronize_net(); if (f) { + kfree(po->rollover); fanout_release_data(f); kfree(f); } @@ -3853,7 +3847,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, void *data = &val; union tpacket_stats_u st; struct tpacket_rollover_stats rstats; - struct packet_rollover *rollover; if (level != SOL_PACKET) return -ENOPROTOOPT; @@ -3932,18 +3925,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, 0); break; case PACKET_ROLLOVER_STATS: - rcu_read_lock(); - rollover = rcu_dereference(po->rollover); - if (rollover) { - rstats.tp_all = atomic_long_read(&rollover->num); - rstats.tp_huge = atomic_long_read(&rollover->num_huge); - rstats.tp_failed = atomic_long_read(&rollover->num_failed); - data = &rstats; - lv = sizeof(rstats); - } - rcu_read_unlock(); - if (!rollover) + if (!po->rollover) return -EINVAL; + rstats.tp_all = atomic_long_read(&po->rollover->num); + rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); + rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); + data = &rstats; + lv = sizeof(rstats); break; case PACKET_TX_HAS_OFF: val = po->tp_tx_has_off; diff --git a/net/packet/internal.h b/net/packet/internal.h index 562fbc15500637..a1d2b2319ae990 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h @@ -95,7 +95,6 @@ struct packet_fanout { struct packet_rollover { int sock; - struct rcu_head rcu; atomic_long_t num; atomic_long_t num_huge; atomic_long_t num_failed; From 589983eb9986ea9c851c8906a81781f317207313 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 28 Nov 2017 08:03:30 -0800 Subject: [PATCH 0584/3326] net/packet: fix a race in packet_bind() and packet_notifier() [ Upstream commit 15fe076edea787807a7cdc168df832544b58eba6 ] syzbot reported crashes [1] and provided a C repro easing bug hunting. When/if packet_do_bind() calls __unregister_prot_hook() and releases po->bind_lock, another thread can run packet_notifier() and process an NETDEV_UP event. This calls register_prot_hook() and hooks again the socket right before first thread is able to grab again po->bind_lock. Fixes this issue by temporarily setting po->num to 0, as suggested by David Miller. [1] dev_remove_pack: ffff8801bf16fa80 not found ------------[ cut here ]------------ kernel BUG at net/core/dev.c:7945! ( BUG_ON(!list_empty(&dev->ptype_all)); ) invalid opcode: 0000 [#1] SMP KASAN Dumping ftrace buffer: (ftrace buffer empty) Modules linked in: device syz0 entered promiscuous mode CPU: 0 PID: 3161 Comm: syzkaller404108 Not tainted 4.14.0+ #190 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 task: ffff8801cc57a500 task.stack: ffff8801cc588000 RIP: 0010:netdev_run_todo+0x772/0xae0 net/core/dev.c:7945 RSP: 0018:ffff8801cc58f598 EFLAGS: 00010293 RAX: ffff8801cc57a500 RBX: dffffc0000000000 RCX: ffffffff841f75b2 RDX: 0000000000000000 RSI: 1ffff100398b1ede RDI: ffff8801bf1f8810 device syz0 entered promiscuous mode RBP: ffff8801cc58f898 R08: 0000000000000001 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: ffff8801bf1f8cd8 R13: ffff8801cc58f870 R14: ffff8801bf1f8780 R15: ffff8801cc58f7f0 FS: 0000000001716880(0000) GS:ffff8801db400000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000020b13000 CR3: 0000000005e25000 CR4: 00000000001406f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: rtnl_unlock+0xe/0x10 net/core/rtnetlink.c:106 tun_detach drivers/net/tun.c:670 [inline] tun_chr_close+0x49/0x60 drivers/net/tun.c:2845 __fput+0x333/0x7f0 fs/file_table.c:210 ____fput+0x15/0x20 fs/file_table.c:244 task_work_run+0x199/0x270 kernel/task_work.c:113 exit_task_work include/linux/task_work.h:22 [inline] do_exit+0x9bb/0x1ae0 kernel/exit.c:865 do_group_exit+0x149/0x400 kernel/exit.c:968 SYSC_exit_group kernel/exit.c:979 [inline] SyS_exit_group+0x1d/0x20 kernel/exit.c:977 entry_SYSCALL_64_fastpath+0x1f/0x96 RIP: 0033:0x44ad19 Fixes: 30f7ea1c2b5f ("packet: race condition in packet_bind") Signed-off-by: Eric Dumazet Reported-by: syzbot Cc: Francesco Ruggeri Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/packet/af_packet.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 2eb4f1e5b8613d..f4a0587b7d5ec8 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3101,6 +3101,10 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, if (need_rehook) { if (po->running) { rcu_read_unlock(); + /* prevents packet_notifier() from calling + * register_prot_hook() + */ + po->num = 0; __unregister_prot_hook(sk, true); rcu_read_lock(); dev_curr = po->prot_hook.dev; @@ -3109,6 +3113,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, dev->ifindex); } + BUG_ON(po->running); po->num = proto; po->prot_hook.type = proto; From f8e5ef4ea8f1037f3f7cfd12291f55baa82e5251 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 29 Nov 2017 17:43:57 -0800 Subject: [PATCH 0585/3326] tcp: remove buggy call to tcp_v6_restore_cb() [ Upstream commit 3016dad75b48279e579117ee3ed566ba90a3b023 ] tcp_v6_send_reset() expects to receive an skb with skb->cb[] layout as used in TCP stack. MD5 lookup uses tcp_v6_iif() and tcp_v6_sdif() and thus TCP_SKB_CB(skb)->header.h6 This patch probably fixes RST packets sent on behalf of a timewait md5 ipv6 socket. Before Florian patch, tcp_v6_restore_cb() was needed before jumping to no_tcp_socket label. Fixes: 271c3b9b7bda ("tcp: honour SO_BINDTODEVICE for TW_RST case too") Signed-off-by: Eric Dumazet Cc: Florian Westphal Acked-by: Florian Westphal Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/tcp_ipv6.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 067350ac843a11..32ded300633d86 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1585,7 +1585,6 @@ static int tcp_v6_rcv(struct sk_buff *skb) tcp_v6_timewait_ack(sk, skb); break; case TCP_TW_RST: - tcp_v6_restore_cb(skb); tcp_v6_send_reset(sk, skb); inet_twsk_deschedule_put(inet_twsk(sk)); goto discard_it; From bea712a8a5aa9c58e06e3bb894dc25633898c450 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Wed, 6 Dec 2017 20:21:24 +0100 Subject: [PATCH 0586/3326] usbnet: fix alignment for frames with no ethernet header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit a4abd7a80addb4a9547f7dfc7812566b60ec505c ] The qmi_wwan minidriver support a 'raw-ip' mode where frames are received without any ethernet header. This causes alignment issues because the skbs allocated by usbnet are "IP aligned". Fix by allowing minidrivers to disable the additional alignment offset. This is implemented using a per-device flag, since the same minidriver also supports 'ethernet' mode. Fixes: 32f7adf633b9 ("net: qmi_wwan: support "raw IP" mode") Reported-and-tested-by: Jay Foster Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/qmi_wwan.c | 2 ++ drivers/net/usb/usbnet.c | 5 ++++- include/linux/usb/usbnet.h | 1 + 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index f4ed553929f0a8..81394a4b280346 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -261,9 +261,11 @@ static void qmi_wwan_netdev_setup(struct net_device *net) net->hard_header_len = 0; net->addr_len = 0; net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + set_bit(EVENT_NO_IP_ALIGN, &dev->flags); netdev_dbg(net, "mode: raw IP\n"); } else if (!net->header_ops) { /* don't bother if already set */ ether_setup(net); + clear_bit(EVENT_NO_IP_ALIGN, &dev->flags); netdev_dbg(net, "mode: Ethernet\n"); } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 6510e5cc1817cf..42baad125a7d0b 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -484,7 +484,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) return -ENOLINK; } - skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); + if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags)) + skb = __netdev_alloc_skb(dev->net, size, flags); + else + skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); if (!skb) { netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); usbnet_defer_kevent (dev, EVENT_RX_MEMORY); diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 97116379db5ff6..e87a805cbfeff7 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -81,6 +81,7 @@ struct usbnet { # define EVENT_RX_KILL 10 # define EVENT_LINK_CHANGE 11 # define EVENT_SET_RX_MODE 12 +# define EVENT_NO_IP_ALIGN 13 }; static inline struct usb_driver *driver_of(struct usb_interface *intf) From 6878a06356a36d69b18916d2f0c483f21e0885d5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 5 Dec 2017 12:45:56 -0800 Subject: [PATCH 0587/3326] net: remove hlist_nulls_add_tail_rcu() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit d7efc6c11b277d9d80b99b1334a78bfe7d7edf10 ] Alexander Potapenko reported use of uninitialized memory [1] This happens when inserting a request socket into TCP ehash, in __sk_nulls_add_node_rcu(), since sk_reuseport is not initialized. Bug was added by commit d894ba18d4e4 ("soreuseport: fix ordering for mixed v4/v6 sockets") Note that d296ba60d8e2 ("soreuseport: Resolve merge conflict for v4/v6 ordering fix") missed the opportunity to get rid of hlist_nulls_add_tail_rcu() : Both UDP sockets and TCP/DCCP listeners no longer use __sk_nulls_add_node_rcu() for their hash insertion. Since all other sockets have unique 4-tuple, the reuseport status has no special meaning, so we can always use hlist_nulls_add_head_rcu() for them and save few cycles/instructions. [1] ================================================================== BUG: KMSAN: use of uninitialized memory in inet_ehash_insert+0xd40/0x1050 CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.13.0+ #3288 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace:    __dump_stack lib/dump_stack.c:16  dump_stack+0x185/0x1d0 lib/dump_stack.c:52  kmsan_report+0x13f/0x1c0 mm/kmsan/kmsan.c:1016  __msan_warning_32+0x69/0xb0 mm/kmsan/kmsan_instr.c:766  __sk_nulls_add_node_rcu ./include/net/sock.h:684  inet_ehash_insert+0xd40/0x1050 net/ipv4/inet_hashtables.c:413  reqsk_queue_hash_req net/ipv4/inet_connection_sock.c:754  inet_csk_reqsk_queue_hash_add+0x1cc/0x300 net/ipv4/inet_connection_sock.c:765  tcp_conn_request+0x31e7/0x36f0 net/ipv4/tcp_input.c:6414  tcp_v4_conn_request+0x16d/0x220 net/ipv4/tcp_ipv4.c:1314  tcp_rcv_state_process+0x42a/0x7210 net/ipv4/tcp_input.c:5917  tcp_v4_do_rcv+0xa6a/0xcd0 net/ipv4/tcp_ipv4.c:1483  tcp_v4_rcv+0x3de0/0x4ab0 net/ipv4/tcp_ipv4.c:1763  ip_local_deliver_finish+0x6bb/0xcb0 net/ipv4/ip_input.c:216  NF_HOOK ./include/linux/netfilter.h:248  ip_local_deliver+0x3fa/0x480 net/ipv4/ip_input.c:257  dst_input ./include/net/dst.h:477  ip_rcv_finish+0x6fb/0x1540 net/ipv4/ip_input.c:397  NF_HOOK ./include/linux/netfilter.h:248  ip_rcv+0x10f6/0x15c0 net/ipv4/ip_input.c:488  __netif_receive_skb_core+0x36f6/0x3f60 net/core/dev.c:4298  __netif_receive_skb net/core/dev.c:4336  netif_receive_skb_internal+0x63c/0x19c0 net/core/dev.c:4497  napi_skb_finish net/core/dev.c:4858  napi_gro_receive+0x629/0xa50 net/core/dev.c:4889  e1000_receive_skb drivers/net/ethernet/intel/e1000/e1000_main.c:4018  e1000_clean_rx_irq+0x1492/0x1d30 drivers/net/ethernet/intel/e1000/e1000_main.c:4474  e1000_clean+0x43aa/0x5970 drivers/net/ethernet/intel/e1000/e1000_main.c:3819  napi_poll net/core/dev.c:5500  net_rx_action+0x73c/0x1820 net/core/dev.c:5566  __do_softirq+0x4b4/0x8dd kernel/softirq.c:284  invoke_softirq kernel/softirq.c:364  irq_exit+0x203/0x240 kernel/softirq.c:405  exiting_irq+0xe/0x10 ./arch/x86/include/asm/apic.h:638  do_IRQ+0x15e/0x1a0 arch/x86/kernel/irq.c:263  common_interrupt+0x86/0x86 Fixes: d894ba18d4e4 ("soreuseport: fix ordering for mixed v4/v6 sockets") Fixes: d296ba60d8e2 ("soreuseport: Resolve merge conflict for v4/v6 ordering fix") Signed-off-by: Eric Dumazet Reported-by: Alexander Potapenko Acked-by: Craig Gallek Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/linux/rculist_nulls.h | 38 ----------------------------------- include/net/sock.h | 6 +----- 2 files changed, 1 insertion(+), 43 deletions(-) diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index a328e8181e49f3..e4b257ff881bfe 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -100,44 +100,6 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, first->pprev = &n->next; } -/** - * hlist_nulls_add_tail_rcu - * @n: the element to add to the hash list. - * @h: the list to add to. - * - * Description: - * Adds the specified element to the end of the specified hlist_nulls, - * while permitting racing traversals. NOTE: tail insertion requires - * list traversal. - * - * The caller must take whatever precautions are necessary - * (such as holding appropriate locks) to avoid racing - * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() - * or hlist_nulls_del_rcu(), running on this same list. - * However, it is perfectly legal to run concurrently with - * the _rcu list-traversal primitives, such as - * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency - * problems on Alpha CPUs. Regardless of the type of CPU, the - * list-traversal primitive must be guarded by rcu_read_lock(). - */ -static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, - struct hlist_nulls_head *h) -{ - struct hlist_nulls_node *i, *last = NULL; - - for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i); - i = hlist_nulls_next_rcu(i)) - last = i; - - if (last) { - n->next = last->next; - n->pprev = &last->next; - rcu_assign_pointer(hlist_nulls_next_rcu(last), n); - } else { - hlist_nulls_add_head_rcu(n, h); - } -} - /** * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type * @tpos: the type * to use as a loop cursor. diff --git a/include/net/sock.h b/include/net/sock.h index a6b9a8d1a6df3f..006580155a87e3 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -683,11 +683,7 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) { - if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && - sk->sk_family == AF_INET6) - hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list); - else - hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); + hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); } static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) From 8b01623c73946edf7946945400305dbc5f1c85a6 Mon Sep 17 00:00:00 2001 From: Lars Persson Date: Fri, 1 Dec 2017 11:12:44 +0100 Subject: [PATCH 0588/3326] stmmac: reset last TSO segment size after device open [ Upstream commit 45ab4b13e46325d00f4acdb365d406e941a15f81 ] The mss variable tracks the last max segment size sent to the TSO engine. We do not update the hardware as long as we receive skb:s with the same value in gso_size. During a network device down/up cycle (mapped to stmmac_release() and stmmac_open() callbacks) we issue a reset to the hardware and it forgets the setting for mss. However we did not zero out our mss variable so the next transmission of a gso packet happens with an undefined hardware setting. This triggers a hang in the TSO engine and eventuelly the netdev watchdog will bark. Fixes: f748be531d70 ("stmmac: support new GMAC4") Signed-off-by: Lars Persson Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 16bd509290844b..28c4d6fa096c95 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2564,6 +2564,7 @@ static int stmmac_open(struct net_device *dev) priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); priv->rx_copybreak = STMMAC_RX_COPYBREAK; + priv->mss = 0; ret = alloc_dma_desc_resources(priv); if (ret < 0) { From 5fa411e8555fd3e1f7fc77290b7bd9977203fa52 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 1 Dec 2017 10:06:56 -0800 Subject: [PATCH 0589/3326] tcp/dccp: block bh before arming time_wait timer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit cfac7f836a715b91f08c851df915d401a4d52783 ] Maciej Żenczykowski reported some panics in tcp_twsk_destructor() that might be caused by the following bug. timewait timer is pinned to the cpu, because we want to transition timwewait refcount from 0 to 4 in one go, once everything has been initialized. At the time commit ed2e92394589 ("tcp/dccp: fix timewait races in timer handling") was merged, TCP was always running from BH habdler. After commit 5413d1babe8f ("net: do not block BH while processing socket backlog") we definitely can run tcp_time_wait() from process context. We need to block BH in the critical section so that the pinned timer has still its purpose. This bug is more likely to happen under stress and when very small RTO are used in datacenter flows. Fixes: 5413d1babe8f ("net: do not block BH while processing socket backlog") Signed-off-by: Eric Dumazet Reported-by: Maciej Żenczykowski Acked-by: Maciej Żenczykowski Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/dccp/minisocks.c | 6 ++++++ net/ipv4/tcp_minisocks.c | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index abd07a44321985..178bb9833311f8 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -57,10 +57,16 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) if (state == DCCP_TIME_WAIT) timeo = DCCP_TIMEWAIT_LEN; + /* tw_timer is pinned, so we need to make sure BH are disabled + * in following section, otherwise timer handler could run before + * we complete the initialization. + */ + local_bh_disable(); inet_twsk_schedule(tw, timeo); /* Linkage updates. */ __inet_twsk_hashdance(tw, sk, &dccp_hashinfo); inet_twsk_put(tw); + local_bh_enable(); } else { /* Sorry, if we're out of memory, just CLOSE this * socket up. We've got bigger problems than diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 188a6f31356db0..420fecbb98fe7e 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -312,10 +312,16 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) if (state == TCP_TIME_WAIT) timeo = TCP_TIMEWAIT_LEN; + /* tw_timer is pinned, so we need to make sure BH are disabled + * in following section, otherwise timer handler could run before + * we complete the initialization. + */ + local_bh_disable(); inet_twsk_schedule(tw, timeo); /* Linkage updates. */ __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); inet_twsk_put(tw); + local_bh_enable(); } else { /* Sorry, if we're out of memory, just CLOSE this * socket up. We've got bigger problems than From cfa19a2edf31f8c4d90d6e936a65ba2072d67bb9 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Fri, 1 Dec 2017 10:14:51 +0100 Subject: [PATCH 0590/3326] s390/qeth: build max size GSO skbs on L2 devices [ Upstream commit 0cbff6d4546613330a1c5f139f5c368e4ce33ca1 ] The current GSO skb size limit was copy&pasted over from the L3 path, where it is needed due to a TSO limitation. As L2 devices don't offer TSO support (and thus all GSO skbs are segmented before they reach the driver), there's no reason to restrict the stack in how large it may build the GSO skbs. Fixes: d52aec97e5bc ("qeth: enable scatter/gather in layer 2 mode") Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/s390/net/qeth_l2_main.c | 2 -- drivers/s390/net/qeth_l3_main.c | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 760b023eae9562..1c977ab10aa7e0 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1027,8 +1027,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->info.broadcast_capable = 1; qeth_l2_request_initial_mac(card); - card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * - PAGE_SIZE; SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); netif_carrier_off(card->dev); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 1d8c317553ba83..75005bbd80b9fb 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2989,8 +2989,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; netif_keep_dst(card->dev); - card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * - PAGE_SIZE; + netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * + PAGE_SIZE); SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); From 79651202a2228b53973e73da1f6b676579df951e Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Fri, 1 Dec 2017 10:14:49 +0100 Subject: [PATCH 0591/3326] s390/qeth: fix thinko in IPv4 multicast address tracking [ Upsteam commit bc3ab70584696cb798b9e1e0ac8e6ced5fd4c3b8 ] Commit 5f78e29ceebf ("qeth: optimize IP handling in rx_mode callback") reworked how secondary addresses are managed for qeth devices. Instead of dropping & subsequently re-adding all addresses on every ndo_set_rx_mode() call, qeth now keeps track of the addresses that are currently registered with the HW. On a ndo_set_rx_mode(), we thus only need to do (de-)registration requests for the addresses that have actually changed. On L3 devices, the lookup for IPv4 Multicast addresses checks the wrong hashtable - and thus never finds a match. As a result, we first delete *all* such addresses, and then re-add them again. So each set_rx_mode() causes a short period where the IPv4 Multicast addresses are not registered, and the card stops forwarding inbound traffic for them. Fix this by setting the ->is_multicast flag on the lookup object, thus enabling qeth_l3_ip_from_hash() to search the correct hashtable and find a match there. Fixes: 5f78e29ceebf ("qeth: optimize IP handling in rx_mode callback") Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/s390/net/qeth_l3_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 75005bbd80b9fb..11f65b87932275 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1376,6 +1376,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); memcpy(tmp->mac, buf, sizeof(tmp->mac)); + tmp->is_multicast = 1; ipm = qeth_l3_ip_from_hash(card, tmp); if (ipm) { From effea096caa1732d2eff941c1beabc9c496b087e Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Fri, 1 Dec 2017 10:14:50 +0100 Subject: [PATCH 0592/3326] s390/qeth: fix GSO throughput regression [ Upstream commit 6d69b1f1eb7a2edf8a3547f361c61f2538e054bb ] Using GSO with small MTUs currently results in a substantial throughput regression - which is caused by how qeth needs to map non-linear skbs into its IO buffer elements: compared to a linear skb, each GSO-segmented skb effectively consumes twice as many buffer elements (ie two instead of one) due to the additional header-only part. This causes the Output Queue to be congested with low-utilized IO buffers. Fix this as follows: If the MSS is low enough so that a non-SG GSO segmentation produces order-0 skbs (currently ~3500 byte), opt out from NETIF_F_SG. This is where we anticipate the biggest savings, since an SG-enabled GSO segmentation produces skbs that always consume at least two buffer elements. Larger MSS values continue to get a SG-enabled GSO segmentation, since 1) the relative overhead of the additional header-only buffer element becomes less noticeable, and 2) the linearization overhead increases. With the throughput regression fixed, re-enable NETIF_F_SG by default to reap the significant CPU savings of GSO. Fixes: 5722963a8e83 ("qeth: do not turn on SG per default") Reported-by: Nils Hoppmann Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/s390/net/qeth_core.h | 3 +++ drivers/s390/net/qeth_core_main.c | 31 +++++++++++++++++++++++++++++++ drivers/s390/net/qeth_l2_main.c | 2 ++ drivers/s390/net/qeth_l3_main.c | 2 ++ 4 files changed, 38 insertions(+) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 47a13c5723c6a7..5340efc673a924 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -985,6 +985,9 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, int qeth_set_features(struct net_device *, netdev_features_t); int qeth_recover_features(struct net_device *); netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); +netdev_features_t qeth_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); int qeth_vm_request_mac(struct qeth_card *card); int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index bae7440abc01e3..330e5d3dadf38a 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -19,6 +19,11 @@ #include #include #include +#include +#include +#include +#include + #include #include @@ -6505,6 +6510,32 @@ netdev_features_t qeth_fix_features(struct net_device *dev, } EXPORT_SYMBOL_GPL(qeth_fix_features); +netdev_features_t qeth_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + /* GSO segmentation builds skbs with + * a (small) linear part for the headers, and + * page frags for the data. + * Compared to a linear skb, the header-only part consumes an + * additional buffer element. This reduces buffer utilization, and + * hurts throughput. So compress small segments into one element. + */ + if (netif_needs_gso(skb, features)) { + /* match skb_segment(): */ + unsigned int doffset = skb->data - skb_mac_header(skb); + unsigned int hsize = skb_shinfo(skb)->gso_size; + unsigned int hroom = skb_headroom(skb); + + /* linearize only if resulting skb allocations are order-0: */ + if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) + features &= ~NETIF_F_SG; + } + + return vlan_features_check(skb, features); +} +EXPORT_SYMBOL_GPL(qeth_features_check); + static int __init qeth_core_init(void) { int rc; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 1c977ab10aa7e0..5a973ebcb13c6f 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -963,6 +963,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_stop = qeth_l2_stop, .ndo_get_stats = qeth_get_stats, .ndo_start_xmit = qeth_l2_hard_start_xmit, + .ndo_features_check = qeth_features_check, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l2_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, @@ -1009,6 +1010,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { card->dev->hw_features = NETIF_F_SG; card->dev->vlan_features = NETIF_F_SG; + card->dev->features |= NETIF_F_SG; /* OSA 3S and earlier has no RX/TX support */ if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) { card->dev->hw_features |= NETIF_F_IP_CSUM; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 11f65b87932275..27185ab38136fc 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2923,6 +2923,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_stop = qeth_l3_stop, .ndo_get_stats = qeth_get_stats, .ndo_start_xmit = qeth_l3_hard_start_xmit, + .ndo_features_check = qeth_features_check, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_multicast_list, .ndo_do_ioctl = qeth_do_ioctl, @@ -2963,6 +2964,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->vlan_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO; + card->dev->features |= NETIF_F_SG; } } } else if (card->info.type == QETH_CARD_TYPE_IQD) { From efb7dc5a9658de725de8d0b2016dbe198406d93e Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sun, 3 Dec 2017 09:33:00 -0800 Subject: [PATCH 0593/3326] tcp: use IPCB instead of TCP_SKB_CB in inet_exact_dif_match() [ Usptream commit b4d1605a8ea608fd7dc45b926a05d75d340bde4b ] After this fix : ("tcp: add tcp_v4_fill_cb()/tcp_v4_restore_cb()"), socket lookups happen while skb->cb[] has not been mangled yet by TCP. Fixes: a04a480d4392 ("net: Require exact match for TCP socket lookups if dif is l3mdev") Signed-off-by: David Ahern Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/tcp.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index e6d0002a1b0bc5..5dfc92ff9ffde9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -874,12 +874,11 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb) } #endif -/* TCP_SKB_CB reference means this can not be used from early demux */ static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) if (!net->ipv4.sysctl_tcp_l3mdev_accept && - skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) + skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) return true; #endif return false; From de514e0609ea1fcf1f70faf427ec9b693d007f8b Mon Sep 17 00:00:00 2001 From: Tommi Rantala Date: Wed, 29 Nov 2017 12:48:42 +0200 Subject: [PATCH 0594/3326] tipc: call tipc_rcv() only if bearer is up in tipc_udp_recv() [ Upstream commit c7799c067c2ae33e348508c8afec354f3257ff25 ] Remove the second tipc_rcv() call in tipc_udp_recv(). We have just checked that the bearer is not up, and calling tipc_rcv() with a bearer that is not up leads to a TIPC div-by-zero crash in tipc_node_calculate_timer(). The crash is rare in practice, but can happen like this: We're enabling a bearer, but it's not yet up and fully initialized. At the same time we receive a discovery packet, and in tipc_udp_recv() we end up calling tipc_rcv() with the not-yet-initialized bearer, causing later the div-by-zero crash in tipc_node_calculate_timer(). Jon Maloy explains the impact of removing the second tipc_rcv() call: "link setup in the worst case will be delayed until the next arriving discovery messages, 1 sec later, and this is an acceptable delay." As the tipc_rcv() call is removed, just leave the function via the rcu_out label, so that we will kfree_skb(). [ 12.590450] Own node address <1.1.1>, network identity 1 [ 12.668088] divide error: 0000 [#1] SMP [ 12.676952] CPU: 2 PID: 0 Comm: swapper/2 Not tainted 4.14.2-dirty #1 [ 12.679225] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-2.fc27 04/01/2014 [ 12.682095] task: ffff8c2a761edb80 task.stack: ffffa41cc0cac000 [ 12.684087] RIP: 0010:tipc_node_calculate_timer.isra.12+0x45/0x60 [tipc] [ 12.686486] RSP: 0018:ffff8c2a7fc838a0 EFLAGS: 00010246 [ 12.688451] RAX: 0000000000000000 RBX: ffff8c2a5b382600 RCX: 0000000000000000 [ 12.691197] RDX: 0000000000000000 RSI: ffff8c2a5b382600 RDI: ffff8c2a5b382600 [ 12.693945] RBP: ffff8c2a7fc838b0 R08: 0000000000000001 R09: 0000000000000001 [ 12.696632] R10: 0000000000000000 R11: 0000000000000000 R12: ffff8c2a5d8949d8 [ 12.699491] R13: ffffffff95ede400 R14: 0000000000000000 R15: ffff8c2a5d894800 [ 12.702338] FS: 0000000000000000(0000) GS:ffff8c2a7fc80000(0000) knlGS:0000000000000000 [ 12.705099] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 12.706776] CR2: 0000000001bb9440 CR3: 00000000bd009001 CR4: 00000000003606e0 [ 12.708847] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 12.711016] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 12.712627] Call Trace: [ 12.713390] [ 12.714011] tipc_node_check_dest+0x2e8/0x350 [tipc] [ 12.715286] tipc_disc_rcv+0x14d/0x1d0 [tipc] [ 12.716370] tipc_rcv+0x8b0/0xd40 [tipc] [ 12.717396] ? minmax_running_min+0x2f/0x60 [ 12.718248] ? dst_alloc+0x4c/0xa0 [ 12.718964] ? tcp_ack+0xaf1/0x10b0 [ 12.719658] ? tipc_udp_is_known_peer+0xa0/0xa0 [tipc] [ 12.720634] tipc_udp_recv+0x71/0x1d0 [tipc] [ 12.721459] ? dst_alloc+0x4c/0xa0 [ 12.722130] udp_queue_rcv_skb+0x264/0x490 [ 12.722924] __udp4_lib_rcv+0x21e/0x990 [ 12.723670] ? ip_route_input_rcu+0x2dd/0xbf0 [ 12.724442] ? tcp_v4_rcv+0x958/0xa40 [ 12.725039] udp_rcv+0x1a/0x20 [ 12.725587] ip_local_deliver_finish+0x97/0x1d0 [ 12.726323] ip_local_deliver+0xaf/0xc0 [ 12.726959] ? ip_route_input_noref+0x19/0x20 [ 12.727689] ip_rcv_finish+0xdd/0x3b0 [ 12.728307] ip_rcv+0x2ac/0x360 [ 12.728839] __netif_receive_skb_core+0x6fb/0xa90 [ 12.729580] ? udp4_gro_receive+0x1a7/0x2c0 [ 12.730274] __netif_receive_skb+0x1d/0x60 [ 12.730953] ? __netif_receive_skb+0x1d/0x60 [ 12.731637] netif_receive_skb_internal+0x37/0xd0 [ 12.732371] napi_gro_receive+0xc7/0xf0 [ 12.732920] receive_buf+0x3c3/0xd40 [ 12.733441] virtnet_poll+0xb1/0x250 [ 12.733944] net_rx_action+0x23e/0x370 [ 12.734476] __do_softirq+0xc5/0x2f8 [ 12.734922] irq_exit+0xfa/0x100 [ 12.735315] do_IRQ+0x4f/0xd0 [ 12.735680] common_interrupt+0xa2/0xa2 [ 12.736126] [ 12.736416] RIP: 0010:native_safe_halt+0x6/0x10 [ 12.736925] RSP: 0018:ffffa41cc0cafe90 EFLAGS: 00000246 ORIG_RAX: ffffffffffffff4d [ 12.737756] RAX: 0000000000000000 RBX: ffff8c2a761edb80 RCX: 0000000000000000 [ 12.738504] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 [ 12.739258] RBP: ffffa41cc0cafe90 R08: 0000014b5b9795e5 R09: ffffa41cc12c7e88 [ 12.740118] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000002 [ 12.740964] R13: ffff8c2a761edb80 R14: 0000000000000000 R15: 0000000000000000 [ 12.741831] default_idle+0x2a/0x100 [ 12.742323] arch_cpu_idle+0xf/0x20 [ 12.742796] default_idle_call+0x28/0x40 [ 12.743312] do_idle+0x179/0x1f0 [ 12.743761] cpu_startup_entry+0x1d/0x20 [ 12.744291] start_secondary+0x112/0x120 [ 12.744816] secondary_startup_64+0xa5/0xa5 [ 12.745367] Code: b9 f4 01 00 00 48 89 c2 48 c1 ea 02 48 3d d3 07 00 00 48 0f 47 d1 49 8b 0c 24 48 39 d1 76 07 49 89 14 24 48 89 d1 31 d2 48 89 df <48> f7 f1 89 c6 e8 81 6e ff ff 5b 41 5c 5d c3 66 90 66 2e 0f 1f [ 12.747527] RIP: tipc_node_calculate_timer.isra.12+0x45/0x60 [tipc] RSP: ffff8c2a7fc838a0 [ 12.748555] ---[ end trace 1399ab83390650fd ]--- [ 12.749296] Kernel panic - not syncing: Fatal exception in interrupt [ 12.750123] Kernel Offset: 0x13200000 from 0xffffffff82000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff) [ 12.751215] Rebooting in 60 seconds.. Fixes: c9b64d492b1f ("tipc: add replicast peer discovery") Signed-off-by: Tommi Rantala Cc: Jon Maloy Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tipc/udp_media.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index ecca64fc6a6f22..3deabcab488216 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -371,10 +371,6 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb) goto rcu_out; } - tipc_rcv(sock_net(sk), skb, b); - rcu_read_unlock(); - return 0; - rcu_out: rcu_read_unlock(); out: From 91c5e6553af065f30390a76bf0556672d6cbb6ab Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 6 Dec 2017 11:08:19 -0800 Subject: [PATCH 0595/3326] tcp: use current time in tcp_rcv_space_adjust() [ Upstream commit 8632385022f2b05a6ca0b9e0f95575865de0e2ce ] When I switched rcv_rtt_est to high resolution timestamps, I forgot that tp->tcp_mstamp needed to be refreshed in tcp_rcv_space_adjust() Using an old timestamp leads to autotuning lags. Fixes: 645f4c6f2ebd ("tcp: switch rcv_rtt_est and rcvq_space to high resolution timestamps") Signed-off-by: Eric Dumazet Cc: Wei Wang Cc: Neal Cardwell Cc: Yuchung Cheng Acked-by: Neal Cardwell Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_input.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b6bb3cdfad09ec..4b10e79210aa77 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -592,6 +592,7 @@ void tcp_rcv_space_adjust(struct sock *sk) int time; int copied; + tcp_mstamp_refresh(tp); time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) return; From 8067098c04be5a9058128164e9779e4c2e7c6dc1 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 27 Nov 2017 18:37:21 +0100 Subject: [PATCH 0596/3326] net: sched: cbq: create block for q->link.block [ Upstream commit d51aae68b142f48232257e96ce317db25445418d ] q->link.block is not initialized, that leads to EINVAL when one tries to add filter there. So initialize it properly. This can be reproduced by: $ tc qdisc add dev eth0 root handle 1: cbq avpkt 1000 rate 1000Mbit bandwidth 1000Mbit $ tc filter add dev eth0 parent 1: protocol ip prio 100 u32 match ip protocol 0 0x00 flowid 1:1 Reported-by: Jaroslav Aster Reported-by: Ivan Vecera Fixes: 6529eaba33f0 ("net: sched: introduce tcf block infractructure") Signed-off-by: Jiri Pirko Acked-by: Eelco Chaudron Reviewed-by: Ivan Vecera Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sched/sch_cbq.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index dcef97fa804739..aeffa320429d86 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1157,9 +1157,13 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) return -EINVAL; + err = tcf_block_get(&q->link.block, &q->link.filter_list); + if (err) + goto put_rtab; + err = qdisc_class_hash_init(&q->clhash); if (err < 0) - goto put_rtab; + goto put_block; q->link.sibling = &q->link; q->link.common.classid = sch->handle; @@ -1193,6 +1197,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) cbq_addprio(q, &q->link); return 0; +put_block: + tcf_block_put(q->link.block); + put_rtab: qdisc_put_rtab(q->link.R_tab); return err; From 616bada6fd46b77351a4033392035de7875c48b6 Mon Sep 17 00:00:00 2001 From: Wei Xu Date: Fri, 1 Dec 2017 05:10:38 -0500 Subject: [PATCH 0597/3326] tap: free skb if flags error [ Upstream commit 61d78537843e676e7f56ac6db333db0c0529b892 ] tap_recvmsg() supports accepting skb by msg_control after commit 3b4ba04acca8 ("tap: support receiving skb from msg_control"), the skb if presented should be freed within the function, otherwise it would be leaked. Signed-off-by: Wei Xu Reported-by: Matthew Rosato Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/tap.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 6c0c84c33e1fb6..e4c6c78fab3b34 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -829,8 +829,11 @@ static ssize_t tap_do_read(struct tap_queue *q, DEFINE_WAIT(wait); ssize_t ret = 0; - if (!iov_iter_count(to)) + if (!iov_iter_count(to)) { + if (skb) + kfree_skb(skb); return 0; + } if (skb) goto put; @@ -1154,11 +1157,14 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct tap_queue *q = container_of(sock, struct tap_queue, sock); + struct sk_buff *skb = m->msg_control; int ret; - if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) + if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) { + if (skb) + kfree_skb(skb); return -EINVAL; - ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, - m->msg_control); + } + ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb); if (ret > total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; From 241eb29c019a0b4e2a3ff5ca4b4449374aeb5f87 Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Fri, 17 Nov 2017 21:06:14 -0500 Subject: [PATCH 0598/3326] tcp: when scheduling TLP, time of RTO should account for current ACK [ Upstream commit ed66dfaf236c04d414de1d218441296e57fb2bd2 ] Fix the TLP scheduling logic so that when scheduling a TLP probe, we ensure that the estimated time at which an RTO would fire accounts for the fact that ACKs indicating forward progress should push back RTO times. After the following fix: df92c8394e6e ("tcp: fix xmit timer to only be reset if data ACKed/SACKed") we had an unintentional behavior change in the following kind of scenario: suppose the RTT variance has been very low recently. Then suppose we send out a flight of N packets and our RTT is 100ms: t=0: send a flight of N packets t=100ms: receive an ACK for N-1 packets The response before df92c8394e6e that was: -> schedule a TLP for now + RTO_interval The response after df92c8394e6e is: -> schedule a TLP for t=0 + RTO_interval Since RTO_interval = srtt + RTT_variance, this means that we have scheduled a TLP timer at a point in the future that only accounts for RTT_variance. If the RTT_variance term is small, this means that the timer fires soon. Before df92c8394e6e this would not happen, because in that code, when we receive an ACK for a prefix of flight, we did: 1) Near the top of tcp_ack(), switch from TLP timer to RTO at write_queue_head->paket_tx_time + RTO_interval: if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) tcp_rearm_rto(sk); 2) In tcp_clean_rtx_queue(), update the RTO to now + RTO_interval: if (flag & FLAG_ACKED) { tcp_rearm_rto(sk); 3) In tcp_ack() after tcp_fastretrans_alert() switch from RTO to TLP at now + RTO_interval: if (icsk->icsk_pending == ICSK_TIME_RETRANS) tcp_schedule_loss_probe(sk); In df92c8394e6e we removed that 3-phase dance, and instead directly set the TLP timer once: we set the TLP timer in cases like this to write_queue_head->packet_tx_time + RTO_interval. So if the RTT variance is small, then this means that this is setting the TLP timer to fire quite soon. This means if the ACK for the tail of the flight takes longer than an RTT to arrive (often due to delayed ACKs), then the TLP timer fires too quickly. Fixes: df92c8394e6e ("tcp: fix xmit timer to only be reset if data ACKed/SACKed") Signed-off-by: Neal Cardwell Signed-off-by: Yuchung Cheng Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/tcp.h | 2 +- net/ipv4/tcp_input.c | 2 +- net/ipv4/tcp_output.c | 8 +++++--- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 5dfc92ff9ffde9..6ced69940f5c83 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -563,7 +563,7 @@ void tcp_push_one(struct sock *, unsigned int mss_now); void tcp_send_ack(struct sock *sk); void tcp_send_delayed_ack(struct sock *sk); void tcp_send_loss_probe(struct sock *sk); -bool tcp_schedule_loss_probe(struct sock *sk); +bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto); void tcp_skb_collapse_tstamp(struct sk_buff *skb, const struct sk_buff *next_skb); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4b10e79210aa77..c5447b9f85175c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3021,7 +3021,7 @@ void tcp_rearm_rto(struct sock *sk) /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */ static void tcp_set_xmit_timer(struct sock *sk) { - if (!tcp_schedule_loss_probe(sk)) + if (!tcp_schedule_loss_probe(sk, true)) tcp_rearm_rto(sk); } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 478909f4694d00..cd3d60bb7cc8ac 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2337,7 +2337,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, /* Send one loss probe per tail loss episode. */ if (push_one != 2) - tcp_schedule_loss_probe(sk); + tcp_schedule_loss_probe(sk, false); is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); tcp_cwnd_validate(sk, is_cwnd_limited); return false; @@ -2345,7 +2345,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, return !tp->packets_out && tcp_send_head(sk); } -bool tcp_schedule_loss_probe(struct sock *sk) +bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -2384,7 +2384,9 @@ bool tcp_schedule_loss_probe(struct sock *sk) } /* If the RTO formula yields an earlier time, then use that time. */ - rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */ + rto_delta_us = advancing_rto ? + jiffies_to_usecs(inet_csk(sk)->icsk_rto) : + tcp_rto_delta_us(sk); /* How far in future is RTO? */ if (rto_delta_us > 0) timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us)); From 0536add671963d9875dc42f734d1608bef480dc7 Mon Sep 17 00:00:00 2001 From: Wei Xu Date: Fri, 1 Dec 2017 05:10:37 -0500 Subject: [PATCH 0599/3326] tun: free skb in early errors [ Upstream commit c33ee15b3820a03cf8229ba9415084197b827f8c ] tun_recvmsg() supports accepting skb by msg_control after commit ac77cfd4258f ("tun: support receiving skb through msg_control"), the skb if presented should be freed no matter how far it can go along, otherwise it would be leaked. This patch fixes several missed cases. Signed-off-by: Wei Xu Reported-by: Matthew Rosato Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/tun.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 42bb820a56c92e..a5c47db94846cd 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1734,8 +1734,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, tun_debug(KERN_INFO, tun, "tun_do_read\n"); - if (!iov_iter_count(to)) + if (!iov_iter_count(to)) { + if (skb) + kfree_skb(skb); return 0; + } if (!skb) { /* Read frames from ring */ @@ -1851,22 +1854,24 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, { struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = __tun_get(tfile); + struct sk_buff *skb = m->msg_control; int ret; - if (!tun) - return -EBADFD; + if (!tun) { + ret = -EBADFD; + goto out_free_skb; + } if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { ret = -EINVAL; - goto out; + goto out_put_tun; } if (flags & MSG_ERRQUEUE) { ret = sock_recv_errqueue(sock->sk, m, total_len, SOL_PACKET, TUN_TX_TIMESTAMP); goto out; } - ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, - m->msg_control); + ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb); if (ret > (ssize_t)total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; @@ -1874,6 +1879,13 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, out: tun_put(tun); return ret; + +out_put_tun: + tun_put(tun); +out_free_skb: + if (skb) + kfree_skb(skb); + return ret; } static int tun_peek_len(struct socket *sock) From 7afe2e668ee2b1f465dcccb942da9d8f8d873f20 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 21 Nov 2017 07:08:57 -0800 Subject: [PATCH 0600/3326] net: ipv6: Fixup device for anycast routes during copy [ Upstream commit 98d11291d189cb5adf49694d0ad1b971c0212697 ] Florian reported a breakage with anycast routes due to commit 4832c30d5458 ("net: ipv6: put host and anycast routes on device with address"). Prior to this commit anycast routes were added against the loopback device causing repetitive route entries with no insight into why they existed. e.g.: $ ip -6 ro ls table local type anycast anycast 2001:db8:1:: dev lo proto kernel metric 0 pref medium anycast 2001:db8:2:: dev lo proto kernel metric 0 pref medium anycast fe80:: dev lo proto kernel metric 0 pref medium anycast fe80:: dev lo proto kernel metric 0 pref medium The point of commit 4832c30d5458 is to add the routes using the device with the address which is causing the route to be added. e.g.,: $ ip -6 ro ls table local type anycast anycast 2001:db8:1:: dev eth1 proto kernel metric 0 pref medium anycast 2001:db8:2:: dev eth2 proto kernel metric 0 pref medium anycast fe80:: dev eth2 proto kernel metric 0 pref medium anycast fe80:: dev eth1 proto kernel metric 0 pref medium For traffic to work as it did before, the dst device needs to be switched to the loopback when the copy is created similar to local routes. Fixes: 4832c30d5458 ("net: ipv6: put host and anycast routes on device with address") Signed-off-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/route.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index a96d5b385d8fa2..598efa8cfe250c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -960,7 +960,7 @@ static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt) { struct net_device *dev = rt->dst.dev; - if (rt->rt6i_flags & RTF_LOCAL) { + if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) { /* for copies of local routes, dst->dev needs to be the * device if it is a master device, the master device if * device is enslaved, and the loopback as the default From 29d631e5941366ee5eb50b3b592ca99186115a63 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 19 Nov 2017 19:31:04 +0800 Subject: [PATCH 0601/3326] tun: fix rcu_read_lock imbalance in tun_build_skb [ Upstream commit 654d573845f35017dc397840fa03610fef3d08b0 ] rcu_read_lock in tun_build_skb is used to rcu_dereference tun->xdp_prog safely, rcu_read_unlock should be done in every return path. Now I could see one place missing it, where it returns NULL in switch-case XDP_REDIRECT, another palce using rcu_read_lock wrongly, where it returns NULL in if (xdp_xmit) chunk. So fix both in this patch. Fixes: 761876c857cb ("tap: XDP support") Signed-off-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/tun.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a5c47db94846cd..d06f88312e1e8f 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1326,6 +1326,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); if (err) goto err_redirect; + rcu_read_unlock(); return NULL; case XDP_TX: xdp_xmit = true; @@ -1358,7 +1359,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, if (xdp_xmit) { skb->dev = tun->dev; generic_xdp_tx(skb, xdp_prog); - rcu_read_lock(); + rcu_read_unlock(); return NULL; } From 60335608e2f1ad6e55acaa4f13bd0f3bcd156dbd Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Tue, 21 Nov 2017 10:22:25 -0500 Subject: [PATCH 0602/3326] net: accept UFO datagrams from tuntap and packet [ Upstream commit 0c19f846d582af919db66a5914a0189f9f92c936 ] Tuntap and similar devices can inject GSO packets. Accept type VIRTIO_NET_HDR_GSO_UDP, even though not generating UFO natively. Processes are expected to use feature negotiation such as TUNSETOFFLOAD to detect supported offload types and refrain from injecting other packets. This process breaks down with live migration: guest kernels do not renegotiate flags, so destination hosts need to expose all features that the source host does. Partially revert the UFO removal from 182e0b6b5846~1..d9d30adf5677. This patch introduces nearly(*) no new code to simplify verification. It brings back verbatim tuntap UFO negotiation, VIRTIO_NET_HDR_GSO_UDP insertion and software UFO segmentation. It does not reinstate protocol stack support, hardware offload (NETIF_F_UFO), SKB_GSO_UDP tunneling in SKB_GSO_SOFTWARE or reception of VIRTIO_NET_HDR_GSO_UDP packets in tuntap. To support SKB_GSO_UDP reappearing in the stack, also reinstate logic in act_csum and openvswitch. Achieve equivalence with v4.13 HEAD by squashing in commit 939912216fa8 ("net: skb_needs_check() removes CHECKSUM_UNNECESSARY check for tx.") and reverting commit 8d63bee643f1 ("net: avoid skb_warn_bad_offload false positives on UFO"). (*) To avoid having to bring back skb_shinfo(skb)->ip6_frag_id, ipv6_proxy_select_ident is changed to return a __be32 and this is assigned directly to the frag_hdr. Also, SKB_GSO_UDP is inserted at the end of the enum to minimize code churn. Tested Booted a v4.13 guest kernel with QEMU. On a host kernel before this patch `ethtool -k eth0` shows UFO disabled. After the patch, it is enabled, same as on a v4.13 host kernel. A UFO packet sent from the guest appears on the tap device: host: nc -l -p -u 8000 & tcpdump -n -i tap0 guest: dd if=/dev/zero of=payload.txt bs=1 count=2000 nc -u 192.16.1.1 8000 < payload.txt Direct tap to tap transmission of VIRTIO_NET_HDR_GSO_UDP succeeds, packets arriving fragmented: ./with_tap_pair.sh ./tap_send_ufo tap0 tap1 (from https://github.com/wdebruij/kerneltools/tree/master/tests) Changes v1 -> v2 - simplified set_offload change (review comment) - documented test procedure Link: http://lkml.kernel.org/r/ Fixes: fb652fdfe837 ("macvlan/macvtap: Remove NETIF_F_UFO advertisement.") Reported-by: Michal Kubecek Signed-off-by: Willem de Bruijn Acked-by: Jason Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/tap.c | 2 +- drivers/net/tun.c | 2 + include/linux/netdev_features.h | 4 +- include/linux/netdevice.h | 1 + include/linux/skbuff.h | 2 + include/linux/virtio_net.h | 5 +- include/net/ipv6.h | 2 +- net/core/dev.c | 3 +- net/ipv4/af_inet.c | 12 ++++- net/ipv4/udp_offload.c | 49 +++++++++++++++++-- net/ipv6/output_core.c | 6 +-- net/ipv6/udp_offload.c | 85 +++++++++++++++++++++++++++++++-- net/openvswitch/datapath.c | 14 ++++++ net/openvswitch/flow.c | 6 ++- net/sched/act_csum.c | 6 +++ 15 files changed, 181 insertions(+), 18 deletions(-) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index e4c6c78fab3b34..bfd4ded0a53fb0 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1080,7 +1080,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd, case TUNSETOFFLOAD: /* let the user check for future flags */ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | - TUN_F_TSO_ECN)) + TUN_F_TSO_ECN | TUN_F_UFO)) return -EINVAL; rtnl_lock(); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index d06f88312e1e8f..c91b110f21699b 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2157,6 +2157,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } + + arg &= ~TUN_F_UFO; } /* This gives the user a way to test for new features in future by diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index dc8b4896b77b09..b1b0ca7ccb2bac 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -54,8 +54,9 @@ enum { NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */ NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */ + NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ - NETIF_F_GSO_ESP_BIT, + NETIF_F_GSO_UDP_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ @@ -132,6 +133,7 @@ enum { #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) #define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP) #define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP) +#define NETIF_F_GSO_UDP __NETIF_F(GSO_UDP) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2eaac7d75af4f1..46bf7cc7d5d589 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4101,6 +4101,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d448a4804aeabb..051e0939ec190a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -569,6 +569,8 @@ enum { SKB_GSO_SCTP = 1 << 14, SKB_GSO_ESP = 1 << 15, + + SKB_GSO_UDP = 1 << 16, }; #if BITS_PER_LONG > 32 diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 210034c896e31e..f144216febc642 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -9,7 +9,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, const struct virtio_net_hdr *hdr, bool little_endian) { - unsigned short gso_type = 0; + unsigned int gso_type = 0; if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { @@ -19,6 +19,9 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, case VIRTIO_NET_HDR_GSO_TCPV6: gso_type = SKB_GSO_TCPV6; break; + case VIRTIO_NET_HDR_GSO_UDP: + gso_type = SKB_GSO_UDP; + break; default: return -EINVAL; } diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 6eac5cf8f1e6e0..35e9dd2d18baa4 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -727,7 +727,7 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add __be32 ipv6_select_ident(struct net *net, const struct in6_addr *daddr, const struct in6_addr *saddr); -void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb); +__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb); int ip6_dst_hoplimit(struct dst_entry *dst); diff --git a/net/core/dev.c b/net/core/dev.c index 11596a302a2652..27357fc1730b93 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2735,7 +2735,8 @@ EXPORT_SYMBOL(skb_mac_gso_segment); static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) { if (tx_path) - return skb->ip_summed != CHECKSUM_PARTIAL; + return skb->ip_summed != CHECKSUM_PARTIAL && + skb->ip_summed != CHECKSUM_UNNECESSARY; return skb->ip_summed == CHECKSUM_NONE; } diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index e31108e5ef79c5..b9d9a2b8792c7a 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1221,9 +1221,10 @@ EXPORT_SYMBOL(inet_sk_rebuild_header); struct sk_buff *inet_gso_segment(struct sk_buff *skb, netdev_features_t features) { - bool fixedid = false, gso_partial, encap; + bool udpfrag = false, fixedid = false, gso_partial, encap; struct sk_buff *segs = ERR_PTR(-EINVAL); const struct net_offload *ops; + unsigned int offset = 0; struct iphdr *iph; int proto, tot_len; int nhoff; @@ -1258,6 +1259,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, segs = ERR_PTR(-EPROTONOSUPPORT); if (!skb->encapsulation || encap) { + udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID); /* fixed ID is invalid if DF bit is not set */ @@ -1277,7 +1279,13 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, skb = segs; do { iph = (struct iphdr *)(skb_mac_header(skb) + nhoff); - if (skb_is_gso(skb)) { + if (udpfrag) { + iph->frag_off = htons(offset >> 3); + if (skb->next) + iph->frag_off |= htons(IP_MF); + offset += skb->len - nhoff - ihl; + tot_len = skb->len - nhoff; + } else if (skb_is_gso(skb)) { if (!fixedid) { iph->id = htons(id); id += skb_shinfo(skb)->gso_segs; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index e360d55be5554d..01801b77bd0da4 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -187,16 +187,57 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, } EXPORT_SYMBOL(skb_udp_tunnel_segment); -static struct sk_buff *udp4_tunnel_segment(struct sk_buff *skb, - netdev_features_t features) +static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, + netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); + unsigned int mss; + __wsum csum; + struct udphdr *uh; + struct iphdr *iph; if (skb->encapsulation && (skb_shinfo(skb)->gso_type & - (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) + (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { segs = skb_udp_tunnel_segment(skb, features, false); + goto out; + } + + if (!pskb_may_pull(skb, sizeof(struct udphdr))) + goto out; + + mss = skb_shinfo(skb)->gso_size; + if (unlikely(skb->len <= mss)) + goto out; + + /* Do software UFO. Complete and fill in the UDP checksum as + * HW cannot do checksum of UDP packets sent as multiple + * IP fragments. + */ + uh = udp_hdr(skb); + iph = ip_hdr(skb); + + uh->check = 0; + csum = skb_checksum(skb, 0, skb->len, 0); + uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* If there is no outer header we can fake a checksum offload + * due to the fact that we have already done the checksum in + * software prior to segmenting the frame. + */ + if (!skb->encap_hdr_csum) + features |= NETIF_F_HW_CSUM; + + /* Fragment the skb. IP headers of the fragments are updated in + * inet_gso_segment() + */ + segs = skb_segment(skb, features); +out: return segs; } @@ -330,7 +371,7 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff) static const struct net_offload udpv4_offload = { .callbacks = { - .gso_segment = udp4_tunnel_segment, + .gso_segment = udp4_ufo_fragment, .gro_receive = udp4_gro_receive, .gro_complete = udp4_gro_complete, }, diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index a338bbc33cf3cd..4fe7c90962ddae 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -39,7 +39,7 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd, * * The network header must be set before calling this. */ -void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) +__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) { static u32 ip6_proxy_idents_hashrnd __read_mostly; struct in6_addr buf[2]; @@ -51,14 +51,14 @@ void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) offsetof(struct ipv6hdr, saddr), sizeof(buf), buf); if (!addrs) - return; + return 0; net_get_random_once(&ip6_proxy_idents_hashrnd, sizeof(ip6_proxy_idents_hashrnd)); id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd, &addrs[1], &addrs[0]); - skb_shinfo(skb)->ip6_frag_id = htonl(id); + return htonl(id); } EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 455fd4e3933323..a0f89ad76f9d22 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -17,15 +17,94 @@ #include #include "ip6_offload.h" -static struct sk_buff *udp6_tunnel_segment(struct sk_buff *skb, - netdev_features_t features) +static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, + netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); + unsigned int mss; + unsigned int unfrag_ip6hlen, unfrag_len; + struct frag_hdr *fptr; + u8 *packet_start, *prevhdr; + u8 nexthdr; + u8 frag_hdr_sz = sizeof(struct frag_hdr); + __wsum csum; + int tnl_hlen; + int err; + + mss = skb_shinfo(skb)->gso_size; + if (unlikely(skb->len <= mss)) + goto out; if (skb->encapsulation && skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)) segs = skb_udp_tunnel_segment(skb, features, true); + else { + const struct ipv6hdr *ipv6h; + struct udphdr *uh; + + if (!pskb_may_pull(skb, sizeof(struct udphdr))) + goto out; + + /* Do software UFO. Complete and fill in the UDP checksum as HW cannot + * do checksum of UDP packets sent as multiple IP fragments. + */ + + uh = udp_hdr(skb); + ipv6h = ipv6_hdr(skb); + + uh->check = 0; + csum = skb_checksum(skb, 0, skb->len, 0); + uh->check = udp_v6_check(skb->len, &ipv6h->saddr, + &ipv6h->daddr, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* If there is no outer header we can fake a checksum offload + * due to the fact that we have already done the checksum in + * software prior to segmenting the frame. + */ + if (!skb->encap_hdr_csum) + features |= NETIF_F_HW_CSUM; + + /* Check if there is enough headroom to insert fragment header. */ + tnl_hlen = skb_tnl_header_len(skb); + if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) { + if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz)) + goto out; + } + + /* Find the unfragmentable header and shift it left by frag_hdr_sz + * bytes to insert fragment header. + */ + err = ip6_find_1stfragopt(skb, &prevhdr); + if (err < 0) + return ERR_PTR(err); + unfrag_ip6hlen = err; + nexthdr = *prevhdr; + *prevhdr = NEXTHDR_FRAGMENT; + unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + + unfrag_ip6hlen + tnl_hlen; + packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; + memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len); + + SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz; + skb->mac_header -= frag_hdr_sz; + skb->network_header -= frag_hdr_sz; + + fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); + fptr->nexthdr = nexthdr; + fptr->reserved = 0; + fptr->identification = ipv6_proxy_select_ident(dev_net(skb->dev), skb); + + /* Fragment the skb. ipv6 header and the remaining fields of the + * fragment header are updated in ipv6_gso_segment() + */ + segs = skb_segment(skb, features); + } +out: return segs; } @@ -75,7 +154,7 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff) static const struct net_offload udpv6_offload = { .callbacks = { - .gso_segment = udp6_tunnel_segment, + .gso_segment = udp6_ufo_fragment, .gro_receive = udp6_gro_receive, .gro_complete = udp6_gro_complete, }, diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index c3aec6227c91b7..294444bb075c8b 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -335,6 +335,8 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info, uint32_t cutlen) { + unsigned short gso_type = skb_shinfo(skb)->gso_type; + struct sw_flow_key later_key; struct sk_buff *segs, *nskb; int err; @@ -345,9 +347,21 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, if (segs == NULL) return -EINVAL; + if (gso_type & SKB_GSO_UDP) { + /* The initial flow key extracted by ovs_flow_key_extract() + * in this case is for a first fragment, so we need to + * properly mark later fragments. + */ + later_key = *key; + later_key.ip.frag = OVS_FRAG_TYPE_LATER; + } + /* Queue all of the segments. */ skb = segs; do { + if (gso_type & SKB_GSO_UDP && skb != segs) + key = &later_key; + err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen); if (err) break; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 8c94cef25a72b6..cfb652a4e00773 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -584,7 +584,8 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) key->ip.frag = OVS_FRAG_TYPE_LATER; return 0; } - if (nh->frag_off & htons(IP_MF)) + if (nh->frag_off & htons(IP_MF) || + skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; else key->ip.frag = OVS_FRAG_TYPE_NONE; @@ -700,6 +701,9 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) if (key->ip.frag == OVS_FRAG_TYPE_LATER) return 0; + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + key->ip.frag = OVS_FRAG_TYPE_FIRST; + /* Transport layer. */ if (key->ip.proto == NEXTHDR_TCP) { if (tcphdr_ok(skb)) { diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 1c40caadcff959..d836f998117b24 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -229,6 +229,9 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl, const struct iphdr *iph; u16 ul; + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + return 1; + /* * Support both UDP and UDPLITE checksum algorithms, Don't use * udph->len to get the real length without any protocol check, @@ -282,6 +285,9 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl, const struct ipv6hdr *ip6h; u16 ul; + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + return 1; + /* * Support both UDP and UDPLITE checksum algorithms, Don't use * udph->len to get the real length without any protocol check, From 87ff3fb30de11a64d25a4a22f23def1ce77cf840 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Sat, 25 Nov 2017 13:14:40 -0600 Subject: [PATCH 0603/3326] net: openvswitch: datapath: fix data type in queue_gso_packets [ Upstream commit 2734166e89639c973c6e125ac8bcfc2d9db72b70 ] gso_type is being used in binary AND operations together with SKB_GSO_UDP. The issue is that variable gso_type is of type unsigned short and SKB_GSO_UDP expands to more than 16 bits: SKB_GSO_UDP = 1 << 16 this makes any binary AND operation between gso_type and SKB_GSO_UDP to be always zero, hence making some code unreachable and likely causing undesired behavior. Fix this by changing the data type of variable gso_type to unsigned int. Addresses-Coverity-ID: 1462223 Fixes: 0c19f846d582 ("net: accept UFO datagrams from tuntap and packet") Signed-off-by: Gustavo A. R. Silva Acked-by: Willem de Bruijn Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/openvswitch/datapath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 294444bb075c8b..363dd904733ddf 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -335,7 +335,7 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info, uint32_t cutlen) { - unsigned short gso_type = skb_shinfo(skb)->gso_type; + unsigned int gso_type = skb_shinfo(skb)->gso_type; struct sw_flow_key later_key; struct sk_buff *segs, *nskb; int err; From 627a5956119fd58bd4b94a26437d449830fc4736 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 27 Nov 2017 11:11:41 -0800 Subject: [PATCH 0604/3326] cls_bpf: don't decrement net's refcount when offload fails [ Upstream commit 25415cec502a1232b19fffc85465882b19a90415 ] When cls_bpf offload was added it seemed like a good idea to call cls_bpf_delete_prog() instead of extending the error handling path, since the software state is fully initialized at that point. This handling of errors without jumping to the end of the function is error prone, as proven by later commit missing that extra call to __cls_bpf_delete_prog(). __cls_bpf_delete_prog() is now expected to be invoked with a reference on exts->net or the field zeroed out. The call on the offload's error patch does not fullfil this requirement, leading to each error stealing a reference on net namespace. Create a function undoing what cls_bpf_set_parms() did and use it from __cls_bpf_delete_prog() and the error path. Fixes: aae2c35ec892 ("cls_bpf: use tcf_exts_get_net() before call_rcu()") Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Acked-by: Daniel Borkmann Acked-by: Cong Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sched/cls_bpf.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 990eb4d91d5425..3a499530f3211c 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -246,11 +246,8 @@ static int cls_bpf_init(struct tcf_proto *tp) return 0; } -static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) +static void cls_bpf_free_parms(struct cls_bpf_prog *prog) { - tcf_exts_destroy(&prog->exts); - tcf_exts_put_net(&prog->exts); - if (cls_bpf_is_ebpf(prog)) bpf_prog_put(prog->filter); else @@ -258,6 +255,14 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) kfree(prog->bpf_name); kfree(prog->bpf_ops); +} + +static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) +{ + tcf_exts_destroy(&prog->exts); + tcf_exts_put_net(&prog->exts); + + cls_bpf_free_parms(prog); kfree(prog); } @@ -509,10 +514,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, goto errout; ret = cls_bpf_offload(tp, prog, oldprog); - if (ret) { - __cls_bpf_delete_prog(prog); - return ret; - } + if (ret) + goto errout_parms; if (!tc_in_hw(prog->gen_flags)) prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; @@ -529,6 +532,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, *arg = prog; return 0; +errout_parms: + cls_bpf_free_parms(prog); errout: tcf_exts_destroy(&prog->exts); kfree(prog); From 0c882334e2b778c95858d38f2180819d887fa5c3 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 26 Nov 2017 20:56:07 +0800 Subject: [PATCH 0605/3326] sctp: use right member as the param of list_for_each_entry [ Upstream commit a8dd397903a6e57157f6265911f7d35681364427 ] Commit d04adf1b3551 ("sctp: reset owner sk for data chunks on out queues when migrating a sock") made a mistake that using 'list' as the param of list_for_each_entry to traverse the retransmit, sacked and abandoned queues, while chunks are using 'transmitted_list' to link into these queues. It could cause NULL dereference panic if there are chunks in any of these queues when peeling off one asoc. So use the chunk member 'transmitted_list' instead in this patch. Fixes: d04adf1b3551 ("sctp: reset owner sk for data chunks on out queues when migrating a sock") Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 14c28fbfe6b8c7..d6163f7aefb141 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -187,13 +187,13 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc, list_for_each_entry(chunk, &t->transmitted, transmitted_list) cb(chunk); - list_for_each_entry(chunk, &q->retransmit, list) + list_for_each_entry(chunk, &q->retransmit, transmitted_list) cb(chunk); - list_for_each_entry(chunk, &q->sacked, list) + list_for_each_entry(chunk, &q->sacked, transmitted_list) cb(chunk); - list_for_each_entry(chunk, &q->abandoned, list) + list_for_each_entry(chunk, &q->abandoned, transmitted_list) cb(chunk); list_for_each_entry(chunk, &q->out_chunk_list, list) From 7a33cd0ef9dda4692cb15fb37eb2e5ab0ef19609 Mon Sep 17 00:00:00 2001 From: Masamitsu Yamazaki Date: Wed, 15 Nov 2017 07:33:14 +0000 Subject: [PATCH 0606/3326] ipmi: Stop timers before cleaning up the module commit 4f7f5551a760eb0124267be65763008169db7087 upstream. System may crash after unloading ipmi_si.ko module because a timer may remain and fire after the module cleaned up resources. cleanup_one_si() contains the following processing. /* * Make sure that interrupts, the timer and the thread are * stopped and will not run again. */ if (to_clean->irq_cleanup) to_clean->irq_cleanup(to_clean); wait_for_timer_and_thread(to_clean); /* * Timeouts are stopped, now make sure the interrupts are off * in the BMC. Note that timers and CPU interrupts are off, * so no need for locks. */ while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { poll(to_clean); schedule_timeout_uninterruptible(1); } si_state changes as following in the while loop calling poll(to_clean). SI_GETTING_MESSAGES => SI_CHECKING_ENABLES => SI_SETTING_ENABLES => SI_GETTING_EVENTS => SI_NORMAL As written in the code comments above, timers are expected to stop before the polling loop and not to run again. But the timer is set again in the following process when si_state becomes SI_SETTING_ENABLES. => poll => smi_event_handler => handle_transaction_done // smi_info->si_state == SI_SETTING_ENABLES => start_getting_events => start_new_msg => smi_mod_timer => mod_timer As a result, before the timer set in start_new_msg() expires, the polling loop may see si_state becoming SI_NORMAL and the module clean-up finishes. For example, hard LOCKUP and panic occurred as following. smi_timeout was called after smi_event_handler, kcs_event and hangs at port_inb() trying to access I/O port after release. [exception RIP: port_inb+19] RIP: ffffffffc0473053 RSP: ffff88069fdc3d80 RFLAGS: 00000006 RAX: ffff8806800f8e00 RBX: ffff880682bd9400 RCX: 0000000000000000 RDX: 0000000000000ca3 RSI: 0000000000000ca3 RDI: ffff8806800f8e40 RBP: ffff88069fdc3d80 R8: ffffffff81d86dfc R9: ffffffff81e36426 R10: 00000000000509f0 R11: 0000000000100000 R12: 0000000000]:000000 R13: 0000000000000000 R14: 0000000000000246 R15: ffff8806800f8e00 ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0000 --- --- To fix the problem I defined a flag, timer_can_start, as member of struct smi_info. The flag is enabled immediately after initializing the timer and disabled immediately before waiting for timer deletion. Fixes: 0cfec916e86d ("ipmi: Start the timer and thread on internal msgs") Signed-off-by: Yamazaki Masamitsu [Some fairly major changes went into the IPMI driver in 4.15, so this required a backport as the code had changed and moved to a different file.] Signed-off-by: Corey Minyard Signed-off-by: Greg Kroah-Hartman --- drivers/char/ipmi/ipmi_si_intf.c | 44 +++++++++++++++++--------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index bc3984ffe86797..c04aa11f0e2145 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -242,6 +242,9 @@ struct smi_info { /* The timer for this si. */ struct timer_list si_timer; + /* This flag is set, if the timer can be set */ + bool timer_can_start; + /* This flag is set, if the timer is running (timer_pending() isn't enough) */ bool timer_running; @@ -417,6 +420,8 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) { + if (!smi_info->timer_can_start) + return; smi_info->last_timeout_jiffies = jiffies; mod_timer(&smi_info->si_timer, new_val); smi_info->timer_running = true; @@ -436,21 +441,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); } -static void start_check_enables(struct smi_info *smi_info, bool start_timer) +static void start_check_enables(struct smi_info *smi_info) { unsigned char msg[2]; msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; - if (start_timer) - start_new_msg(smi_info, msg, 2); - else - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); + start_new_msg(smi_info, msg, 2); smi_info->si_state = SI_CHECKING_ENABLES; } -static void start_clear_flags(struct smi_info *smi_info, bool start_timer) +static void start_clear_flags(struct smi_info *smi_info) { unsigned char msg[3]; @@ -459,10 +461,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer) msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; msg[2] = WDT_PRE_TIMEOUT_INT; - if (start_timer) - start_new_msg(smi_info, msg, 3); - else - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); + start_new_msg(smi_info, msg, 3); smi_info->si_state = SI_CLEARING_FLAGS; } @@ -497,11 +496,11 @@ static void start_getting_events(struct smi_info *smi_info) * Note that we cannot just use disable_irq(), since the interrupt may * be shared. */ -static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer) +static inline bool disable_si_irq(struct smi_info *smi_info) { if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { smi_info->interrupt_disabled = true; - start_check_enables(smi_info, start_timer); + start_check_enables(smi_info); return true; } return false; @@ -511,7 +510,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info) { if ((smi_info->irq) && (smi_info->interrupt_disabled)) { smi_info->interrupt_disabled = false; - start_check_enables(smi_info, true); + start_check_enables(smi_info); return true; } return false; @@ -529,7 +528,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) msg = ipmi_alloc_smi_msg(); if (!msg) { - if (!disable_si_irq(smi_info, true)) + if (!disable_si_irq(smi_info)) smi_info->si_state = SI_NORMAL; } else if (enable_si_irq(smi_info)) { ipmi_free_smi_msg(msg); @@ -545,7 +544,7 @@ static void handle_flags(struct smi_info *smi_info) /* Watchdog pre-timeout */ smi_inc_stat(smi_info, watchdog_pretimeouts); - start_clear_flags(smi_info, true); + start_clear_flags(smi_info); smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; if (smi_info->intf) ipmi_smi_watchdog_pretimeout(smi_info->intf); @@ -928,7 +927,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, * disable and messages disabled. */ if (smi_info->supports_event_msg_buff || smi_info->irq) { - start_check_enables(smi_info, true); + start_check_enables(smi_info); } else { smi_info->curr_msg = alloc_msg_handle_irq(smi_info); if (!smi_info->curr_msg) @@ -1235,6 +1234,7 @@ static int smi_start_processing(void *send_info, /* Set up the timer that drives the interface. */ setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); + new_smi->timer_can_start = true; smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); /* Try to claim any interrupts. */ @@ -3416,10 +3416,12 @@ static void check_for_broken_irqs(struct smi_info *smi_info) check_set_rcv_irq(smi_info); } -static inline void wait_for_timer_and_thread(struct smi_info *smi_info) +static inline void stop_timer_and_thread(struct smi_info *smi_info) { if (smi_info->thread != NULL) kthread_stop(smi_info->thread); + + smi_info->timer_can_start = false; if (smi_info->timer_running) del_timer_sync(&smi_info->si_timer); } @@ -3605,7 +3607,7 @@ static int try_smi_init(struct smi_info *new_smi) * Start clearing the flags before we enable interrupts or the * timer to avoid racing with the timer. */ - start_clear_flags(new_smi, false); + start_clear_flags(new_smi); /* * IRQ is defined to be set when non-zero. req_events will @@ -3674,7 +3676,7 @@ static int try_smi_init(struct smi_info *new_smi) return 0; out_err_stop_timer: - wait_for_timer_and_thread(new_smi); + stop_timer_and_thread(new_smi); out_err: new_smi->interrupt_disabled = true; @@ -3866,7 +3868,7 @@ static void cleanup_one_si(struct smi_info *to_clean) */ if (to_clean->irq_cleanup) to_clean->irq_cleanup(to_clean); - wait_for_timer_and_thread(to_clean); + stop_timer_and_thread(to_clean); /* * Timeouts are stopped, now make sure the interrupts are off @@ -3878,7 +3880,7 @@ static void cleanup_one_si(struct smi_info *to_clean) schedule_timeout_uninterruptible(1); } if (to_clean->handlers) - disable_si_irq(to_clean, false); + disable_si_irq(to_clean); while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { poll(to_clean); schedule_timeout_uninterruptible(1); From 4b57542126669a5ad8bede61c8e9e07a5fc4a39b Mon Sep 17 00:00:00 2001 From: Vincent Pelletier Date: Sun, 26 Nov 2017 06:52:53 +0000 Subject: [PATCH 0607/3326] usb: gadget: ffs: Forbid usb_ep_alloc_request from sleeping commit 30bf90ccdec1da9c8198b161ecbff39ce4e5a9ba upstream. Found using DEBUG_ATOMIC_SLEEP while submitting an AIO read operation: [ 100.853642] BUG: sleeping function called from invalid context at mm/slab.h:421 [ 100.861148] in_atomic(): 1, irqs_disabled(): 1, pid: 1880, name: python [ 100.867954] 2 locks held by python/1880: [ 100.867961] #0: (&epfile->mutex){....}, at: [] ffs_mutex_lock+0x27/0x30 [usb_f_fs] [ 100.868020] #1: (&(&ffs->eps_lock)->rlock){....}, at: [] ffs_epfile_io.isra.17+0x24b/0x590 [usb_f_fs] [ 100.868076] CPU: 1 PID: 1880 Comm: python Not tainted 4.14.0-edison+ #118 [ 100.868085] Hardware name: Intel Corporation Merrifield/BODEGA BAY, BIOS 542 2015.01.21:18.19.48 [ 100.868093] Call Trace: [ 100.868122] dump_stack+0x47/0x62 [ 100.868156] ___might_sleep+0xfd/0x110 [ 100.868182] __might_sleep+0x68/0x70 [ 100.868217] kmem_cache_alloc_trace+0x4b/0x200 [ 100.868248] ? dwc3_gadget_ep_alloc_request+0x24/0xe0 [dwc3] [ 100.868302] dwc3_gadget_ep_alloc_request+0x24/0xe0 [dwc3] [ 100.868343] usb_ep_alloc_request+0x16/0xc0 [udc_core] [ 100.868386] ffs_epfile_io.isra.17+0x444/0x590 [usb_f_fs] [ 100.868424] ? _raw_spin_unlock_irqrestore+0x27/0x40 [ 100.868457] ? kiocb_set_cancel_fn+0x57/0x60 [ 100.868477] ? ffs_ep0_poll+0xc0/0xc0 [usb_f_fs] [ 100.868512] ffs_epfile_read_iter+0xfe/0x157 [usb_f_fs] [ 100.868551] ? security_file_permission+0x9c/0xd0 [ 100.868587] ? rw_verify_area+0xac/0x120 [ 100.868633] aio_read+0x9d/0x100 [ 100.868692] ? __fget+0xa2/0xd0 [ 100.868727] ? __might_sleep+0x68/0x70 [ 100.868763] SyS_io_submit+0x471/0x680 [ 100.868878] do_int80_syscall_32+0x4e/0xd0 [ 100.868921] entry_INT80_32+0x2a/0x2a [ 100.868932] EIP: 0xb7fbb676 [ 100.868941] EFLAGS: 00000292 CPU: 1 [ 100.868951] EAX: ffffffda EBX: b7aa2000 ECX: 00000002 EDX: b7af8368 [ 100.868961] ESI: b7fbb660 EDI: b7aab000 EBP: bfb6c658 ESP: bfb6c638 [ 100.868973] DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 007b Signed-off-by: Vincent Pelletier Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/f_fs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 0202e5132fa78f..876cdbec130743 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1016,7 +1016,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) else ret = ep->status; goto error_mutex; - } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) { + } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) { ret = -ENOMEM; } else { req->buf = data; From fc2f802193ccecf710ab608712b5b102dd9efc75 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 14 Nov 2017 14:42:57 -0500 Subject: [PATCH 0608/3326] fcntl: don't cap l_start and l_end values for F_GETLK64 in compat syscall commit 4d2dc2cc766c3b51929658cacbc6e34fc8e242fb upstream. Currently, we're capping the values too low in the F_GETLK64 case. The fields in that structure are 64-bit values, so we shouldn't need to do any sort of fixup there. Make sure we check that assumption at build time in the future however by ensuring that the sizes we're copying will fit. With this, we no longer need COMPAT_LOFF_T_MAX either, so remove it. Fixes: 94073ad77fff2 (fs/locks: don't mess with the address limit in compat_fcntl64) Reported-by: Vitaly Lipatov Signed-off-by: Jeff Layton Reviewed-by: David Howells Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/compat.h | 1 - arch/mips/include/asm/compat.h | 1 - arch/parisc/include/asm/compat.h | 1 - arch/powerpc/include/asm/compat.h | 1 - arch/s390/include/asm/compat.h | 1 - arch/sparc/include/asm/compat.h | 1 - arch/tile/include/asm/compat.h | 1 - arch/x86/include/asm/compat.h | 1 - fs/fcntl.c | 11 +++++------ 9 files changed, 5 insertions(+), 14 deletions(-) diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index e39d487bf72438..a3c7f271ad4c79 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -215,7 +215,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 8e2b5b55648868..49691331ada4c1 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -200,7 +200,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index 07f48827afdae8..acf8aa07cbe099 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -195,7 +195,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL struct compat_ipc64_perm { compat_key_t key; diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index a035b1e5dfa7c6..8a2aecfe9b0246 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -185,7 +185,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 1b60eb3676d556..5e6a63641a5f19 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -263,7 +263,6 @@ typedef struct compat_siginfo { #define si_overrun _sifields._timer._overrun #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h index 977c3f280ba190..fa38c78de0f001 100644 --- a/arch/sparc/include/asm/compat.h +++ b/arch/sparc/include/asm/compat.h @@ -209,7 +209,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index c14e36f008c8f3..62a7b83025dd24 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h @@ -173,7 +173,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL struct compat_ipc64_perm { compat_key_t key; diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index 9eef9cc64c6846..70bc1df580b2da 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -209,7 +209,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL struct compat_ipc64_perm { compat_key_t key; diff --git a/fs/fcntl.c b/fs/fcntl.c index 6fd311367efc8e..0345a46b885654 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -563,6 +563,9 @@ static int put_compat_flock64(const struct flock *kfl, struct compat_flock64 __u { struct compat_flock64 fl; + BUILD_BUG_ON(sizeof(kfl->l_start) > sizeof(ufl->l_start)); + BUILD_BUG_ON(sizeof(kfl->l_len) > sizeof(ufl->l_len)); + memset(&fl, 0, sizeof(struct compat_flock64)); copy_flock_fields(&fl, kfl); if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64))) @@ -641,12 +644,8 @@ COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, if (err) break; err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock); - if (err) - break; - err = fixup_compat_flock(&flock); - if (err) - return err; - err = put_compat_flock64(&flock, compat_ptr(arg)); + if (!err) + err = put_compat_flock64(&flock, compat_ptr(arg)); break; case F_SETLK: case F_SETLKW: From e587b76e655441b7f3239dfb999c444c8573820c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 5 Dec 2017 23:27:57 +0000 Subject: [PATCH 0609/3326] fix kcm_clone() commit a5739435b5a3b8c449f8844ecd71a3b1e89f0a33 upstream. 1) it's fput() or sock_release(), not both 2) don't do fd_install() until the last failure exit. 3) not a bug per se, but... don't attach socket to struct file until it's set up. Take reserving descriptor into the caller, move fd_install() to the caller, sanitize failure exits and calling conventions. Acked-by: Tom Herbert Signed-off-by: Al Viro Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/kcm/kcmsock.c | 71 ++++++++++++++++++----------------------------- 1 file changed, 27 insertions(+), 44 deletions(-) diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index af4e76ac88ff08..c5fa634e63ca2c 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1625,60 +1625,35 @@ static struct proto kcm_proto = { }; /* Clone a kcm socket. */ -static int kcm_clone(struct socket *osock, struct kcm_clone *info, - struct socket **newsockp) +static struct file *kcm_clone(struct socket *osock) { struct socket *newsock; struct sock *newsk; - struct file *newfile; - int err, newfd; + struct file *file; - err = -ENFILE; newsock = sock_alloc(); if (!newsock) - goto out; + return ERR_PTR(-ENFILE); newsock->type = osock->type; newsock->ops = osock->ops; __module_get(newsock->ops->owner); - newfd = get_unused_fd_flags(0); - if (unlikely(newfd < 0)) { - err = newfd; - goto out_fd_fail; - } - - newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name); - if (unlikely(IS_ERR(newfile))) { - err = PTR_ERR(newfile); - goto out_sock_alloc_fail; - } - newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, &kcm_proto, true); if (!newsk) { - err = -ENOMEM; - goto out_sk_alloc_fail; + sock_release(newsock); + return ERR_PTR(-ENOMEM); } - sock_init_data(newsock, newsk); init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux); - fd_install(newfd, newfile); - *newsockp = newsock; - info->fd = newfd; - - return 0; + file = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name); + if (IS_ERR(file)) + sock_release(newsock); -out_sk_alloc_fail: - fput(newfile); -out_sock_alloc_fail: - put_unused_fd(newfd); -out_fd_fail: - sock_release(newsock); -out: - return err; + return file; } static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) @@ -1708,17 +1683,25 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) } case SIOCKCMCLONE: { struct kcm_clone info; - struct socket *newsock = NULL; - - err = kcm_clone(sock, &info, &newsock); - if (!err) { - if (copy_to_user((void __user *)arg, &info, - sizeof(info))) { - err = -EFAULT; - sys_close(info.fd); - } - } + struct file *file; + + info.fd = get_unused_fd_flags(0); + if (unlikely(info.fd < 0)) + return info.fd; + file = kcm_clone(sock); + if (IS_ERR(file)) { + put_unused_fd(info.fd); + return PTR_ERR(file); + } + if (copy_to_user((void __user *)arg, &info, + sizeof(info))) { + put_unused_fd(info.fd); + fput(file); + return -EFAULT; + } + fd_install(info.fd, file); + err = 0; break; } default: From d7a71904e6027136bea9a24cb30e75777d8d9256 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 16 Nov 2017 17:58:17 +0000 Subject: [PATCH 0610/3326] KVM: arm/arm64: vgic-its: Preserve the revious read from the pending table commit 64afe6e9eb4841f35317da4393de21a047a883b3 upstream. The current pending table parsing code assumes that we keep the previous read of the pending bits, but keep that variable in the current block, making sure it is discarded on each loop. We end-up using whatever is on the stack. Who knows, it might just be the right thing... Fixes: 33d3bc9556a7d ("KVM: arm64: vgic-its: Read initial LPI pending table") Reported-by: AKASHI Takahiro Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Greg Kroah-Hartman --- virt/kvm/arm/vgic/vgic-its.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 3108e07526af50..59ce2fb4982111 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -393,6 +393,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) int ret = 0; u32 *intids; int nr_irqs, i; + u8 pendmask; nr_irqs = vgic_copy_lpi_list(vcpu, &intids); if (nr_irqs < 0) @@ -400,7 +401,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) for (i = 0; i < nr_irqs; i++) { int byte_offset, bit_nr; - u8 pendmask; byte_offset = intids[i] / BITS_PER_BYTE; bit_nr = intids[i] % BITS_PER_BYTE; From b0c08c89ea1c0f47423d1da0bcdcd182611823c6 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 12 Oct 2017 18:22:25 +0900 Subject: [PATCH 0611/3326] kbuild: do not call cc-option before KBUILD_CFLAGS initialization [ Upstream commit 433dc2ebe7d17dd21cba7ad5c362d37323592236 ] Some $(call cc-option,...) are invoked very early, even before KBUILD_CFLAGS, etc. are initialized. The returned string from $(call cc-option,...) depends on KBUILD_CPPFLAGS, KBUILD_CFLAGS, and GCC_PLUGINS_CFLAGS. Since they are exported, they are not empty when the top Makefile is recursively invoked. The recursion occurs in several places. For example, the top Makefile invokes itself for silentoldconfig. "make tinyconfig", "make rpm-pkg" are the cases, too. In those cases, the second call of cc-option from the same line runs a different shell command due to non-pristine KBUILD_CFLAGS. To get the same result all the time, KBUILD_* and GCC_PLUGINS_CFLAGS must be initialized before any call of cc-option. This avoids garbage data in the .cache.mk file. Move all calls of cc-option below the config targets because target compiler flags are unnecessary for Kconfig. Signed-off-by: Masahiro Yamada Reviewed-by: Douglas Anderson Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- Makefile | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index eabbd7748a2461..793183f9e3e519 100644 --- a/Makefile +++ b/Makefile @@ -373,9 +373,6 @@ LDFLAGS_MODULE = CFLAGS_KERNEL = AFLAGS_KERNEL = LDFLAGS_vmlinux = -CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) -CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) - # Use USERINCLUDE when you must reference the UAPI directories only. USERINCLUDE := \ @@ -394,21 +391,19 @@ LINUXINCLUDE := \ -I$(objtree)/include \ $(USERINCLUDE) -KBUILD_CPPFLAGS := -D__KERNEL__ - +KBUILD_AFLAGS := -D__ASSEMBLY__ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common -fshort-wchar \ -Werror-implicit-function-declaration \ -Wno-format-security \ - -std=gnu89 $(call cc-option,-fno-PIE) - - + -std=gnu89 +KBUILD_CPPFLAGS := -D__KERNEL__ KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := -KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE) KBUILD_AFLAGS_MODULE := -DMODULE KBUILD_CFLAGS_MODULE := -DMODULE KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds +GCC_PLUGINS_CFLAGS := # Read KERNELRELEASE from include/config/kernel.release (if it exists) KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) @@ -421,7 +416,7 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS -export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN CFLAGS_UBSAN +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL @@ -622,6 +617,12 @@ endif # Defaults to vmlinux, but the arch makefile usually adds further targets all: vmlinux +KBUILD_CFLAGS += $(call cc-option,-fno-PIE) +KBUILD_AFLAGS += $(call cc-option,-fno-PIE) +CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) +CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) +export CFLAGS_GCOV CFLAGS_KCOV + # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default # values of the respective KBUILD_* variables ARCH_CPPFLAGS := From 854b27edf7b0672e5181e75274dc734863c89f2e Mon Sep 17 00:00:00 2001 From: Vaidyanathan Srinivasan Date: Thu, 24 Aug 2017 00:28:41 +0530 Subject: [PATCH 0612/3326] powerpc/powernv/idle: Round up latency and residency values [ Upstream commit 8d4e10e9ed9450e18fbbf6a8872be0eac9fd4999 ] On PowerNV platforms, firmware provides exit latency and target residency for each of the idle states in nano seconds. Cpuidle framework expects the values in micro seconds. Round up to nearest micro seconds to avoid errors in cases where the values are defined as fractional micro seconds. Default idle state of 'snooze' has exit latency of zero. If other states have fractional micro second exit latency, they would get rounded down to zero micro second and make cpuidle framework choose deeper idle state when snooze loop is the right choice. Reported-by: Anton Blanchard Signed-off-by: Vaidyanathan Srinivasan Reviewed-by: Gautham R. Shenoy Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/cpuidle/cpuidle-powernv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index ed6531f075c621..e06605b21841ac 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -384,9 +384,9 @@ static int powernv_add_idle_states(void) * Firmware passes residency and latency values in ns. * cpuidle expects it in us. */ - exit_latency = latency_ns[i] / 1000; + exit_latency = DIV_ROUND_UP(latency_ns[i], 1000); if (!rc) - target_residency = residency_ns[i] / 1000; + target_residency = DIV_ROUND_UP(residency_ns[i], 1000); else target_residency = 0; From 7015ca81bc8041c17f2d45943703c5b887207d5d Mon Sep 17 00:00:00 2001 From: Keefe Liu Date: Thu, 9 Nov 2017 20:09:31 +0800 Subject: [PATCH 0613/3326] ipvlan: fix ipv6 outbound device [ Upstream commit ca29fd7cce5a6444d57fb86517589a1a31c759e1 ] When process the outbound packet of ipv6, we should assign the master device to output device other than input device. Signed-off-by: Keefe Liu Acked-by: Mahesh Bandewar Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ipvlan/ipvlan_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 1f3295e274d0f5..8feb84fd4ca713 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -409,7 +409,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) struct dst_entry *dst; int err, ret = NET_XMIT_DROP; struct flowi6 fl6 = { - .flowi6_iif = dev->ifindex, + .flowi6_oif = dev->ifindex, .daddr = ip6h->daddr, .saddr = ip6h->saddr, .flowi6_flags = FLOWI_FLAG_ANYSRC, From bd099ef951dd15883c010e044b3c9de4276dd5ae Mon Sep 17 00:00:00 2001 From: Hongxu Jia Date: Fri, 10 Nov 2017 15:59:17 +0800 Subject: [PATCH 0614/3326] ide: ide-atapi: fix compile error with defining macro DEBUG [ Upstream commit 8dc7a31fbce5e2dbbacd83d910da37105181b054 ] Compile ide-atapi failed with defining macro "DEBUG" ... |drivers/ide/ide-atapi.c:285:52: error: 'struct request' has no member named 'cmd'; did you mean 'csd'? | debug_log("%s: rq->cmd[0]: 0x%x\n", __func__, rq->cmd[0]); ... Since we split the scsi_request out of struct request, it missed do the same thing on debug_log Fixes: 82ed4db499b8 ("block: split scsi_request out of struct request") Signed-off-by: Hongxu Jia Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/ide/ide-atapi.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 14d1e7d9a1d6f7..0e6bc631a1caf6 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -282,7 +282,7 @@ int ide_cd_expiry(ide_drive_t *drive) struct request *rq = drive->hwif->rq; unsigned long wait = 0; - debug_log("%s: rq->cmd[0]: 0x%x\n", __func__, rq->cmd[0]); + debug_log("%s: scsi_req(rq)->cmd[0]: 0x%x\n", __func__, scsi_req(rq)->cmd[0]); /* * Some commands are *slow* and normally take a long time to complete. @@ -463,7 +463,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) return ide_do_reset(drive); } - debug_log("[cmd %x]: check condition\n", rq->cmd[0]); + debug_log("[cmd %x]: check condition\n", scsi_req(rq)->cmd[0]); /* Retry operation */ ide_retry_pc(drive); @@ -531,7 +531,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) ide_pad_transfer(drive, write, bcount); debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n", - rq->cmd[0], done, bcount, scsi_req(rq)->resid_len); + scsi_req(rq)->cmd[0], done, bcount, scsi_req(rq)->resid_len); /* And set the interrupt handler again */ ide_set_handler(drive, ide_pc_intr, timeout); From a4000d951e3723d5aff6dd4ce069c5908365dc24 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 8 Nov 2017 10:23:45 -0800 Subject: [PATCH 0615/3326] blk-mq: Avoid that request queue removal can trigger list corruption [ Upstream commit aba7afc5671c23beade64d10caf86e24a9105dab ] Avoid that removal of a request queue sporadically triggers the following warning: list_del corruption. next->prev should be ffff8807d649b970, but was 6b6b6b6b6b6b6b6b WARNING: CPU: 3 PID: 342 at lib/list_debug.c:56 __list_del_entry_valid+0x92/0xa0 Call Trace: process_one_work+0x11b/0x660 worker_thread+0x3d/0x3b0 kthread+0x129/0x140 ret_from_fork+0x27/0x40 Signed-off-by: Bart Van Assche Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Johannes Thumshirn Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- block/blk-core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/block/blk-core.c b/block/blk-core.c index 516ce3174683a9..7b30bf10b1d4f4 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -339,6 +339,7 @@ void blk_sync_queue(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; + cancel_delayed_work_sync(&q->requeue_work); queue_for_each_hw_ctx(q, hctx, i) cancel_delayed_work_sync(&hctx->run_work); } else { From 7536280f9c949a97e06381a6137412c06838c3e7 Mon Sep 17 00:00:00 2001 From: Israel Rukshin Date: Sun, 5 Nov 2017 08:43:01 +0000 Subject: [PATCH 0616/3326] nvmet-rdma: update queue list during ib_device removal [ Upstream commit 43b92fd27aaef0f529c9321cfebbaec1d7b8f503 ] A NULL deref happens when nvmet_rdma_remove_one() is called more than once (e.g. while connected via 2 ports). The first call frees the queues related to the first ib_device but doesn't remove them from the queue list. While calling nvmet_rdma_remove_one() for the second ib_device it goes over the full queue list again and we get the NULL deref. Fixes: f1d4ef7d ("nvmet-rdma: register ib_client to not deadlock in device removal") Signed-off-by: Israel Rukshin Reviewed-by: Max Gurtovoy Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/target/rdma.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 76d2bb793afe50..3333d417b248ba 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1512,15 +1512,17 @@ static struct nvmet_fabrics_ops nvmet_rdma_ops = { static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) { - struct nvmet_rdma_queue *queue; + struct nvmet_rdma_queue *queue, *tmp; /* Device is being removed, delete all queues using this device */ mutex_lock(&nvmet_rdma_queue_mutex); - list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { + list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, + queue_list) { if (queue->dev->device != ib_device) continue; pr_info("Removing queue %d\n", queue->idx); + list_del_init(&queue->queue_list); __nvmet_rdma_queue_disconnect(queue); } mutex_unlock(&nvmet_rdma_queue_mutex); From 4086f7cf0c3e2fe275a2a18dc25749df348c0cdb Mon Sep 17 00:00:00 2001 From: Steve Grubb Date: Tue, 17 Oct 2017 18:29:22 -0400 Subject: [PATCH 0617/3326] audit: Allow auditd to set pid to 0 to end auditing [ Upstream commit 33e8a907804428109ce1d12301c3365d619cc4df ] The API to end auditing has historically been for auditd to set the pid to 0. This patch restores that functionality. See: https://github.com/linux-audit/audit-kernel/issues/69 Reviewed-by: Richard Guy Briggs Signed-off-by: Steve Grubb Signed-off-by: Paul Moore Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/audit.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/kernel/audit.c b/kernel/audit.c index be1c28fd4d575d..d779326e53c03d 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1197,25 +1197,28 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) pid_t auditd_pid; struct pid *req_pid = task_tgid(current); - /* sanity check - PID values must match */ - if (new_pid != pid_vnr(req_pid)) + /* Sanity check - PID values must match. Setting + * pid to 0 is how auditd ends auditing. */ + if (new_pid && (new_pid != pid_vnr(req_pid))) return -EINVAL; /* test the auditd connection */ audit_replace(req_pid); auditd_pid = auditd_pid_vnr(); - /* only the current auditd can unregister itself */ - if ((!new_pid) && (new_pid != auditd_pid)) { - audit_log_config_change("audit_pid", new_pid, - auditd_pid, 0); - return -EACCES; - } - /* replacing a healthy auditd is not allowed */ - if (auditd_pid && new_pid) { - audit_log_config_change("audit_pid", new_pid, - auditd_pid, 0); - return -EEXIST; + if (auditd_pid) { + /* replacing a healthy auditd is not allowed */ + if (new_pid) { + audit_log_config_change("audit_pid", + new_pid, auditd_pid, 0); + return -EEXIST; + } + /* only current auditd can unregister itself */ + if (pid_vnr(req_pid) != auditd_pid) { + audit_log_config_change("audit_pid", + new_pid, auditd_pid, 0); + return -EACCES; + } } if (new_pid) { From 0ad0bb60166d8e4fbacaaaaaeb10a24de5e99aff Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Fri, 1 Sep 2017 09:44:34 -0400 Subject: [PATCH 0618/3326] audit: ensure that 'audit=1' actually enables audit for PID 1 [ Upstream commit 173743dd99a49c956b124a74c8aacb0384739a4c ] Prior to this patch we enabled audit in audit_init(), which is too late for PID 1 as the standard initcalls are run after the PID 1 task is forked. This means that we never allocate an audit_context (see audit_alloc()) for PID 1 and therefore miss a lot of audit events generated by PID 1. This patch enables audit as early as possible to help ensure that when PID 1 is forked it can allocate an audit_context if required. Reviewed-by: Richard Guy Briggs Signed-off-by: Paul Moore Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/audit.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/audit.c b/kernel/audit.c index d779326e53c03d..5b34d3114af484 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -85,13 +85,13 @@ static int audit_initialized; #define AUDIT_OFF 0 #define AUDIT_ON 1 #define AUDIT_LOCKED 2 -u32 audit_enabled; -u32 audit_ever_enabled; +u32 audit_enabled = AUDIT_OFF; +u32 audit_ever_enabled = !!AUDIT_OFF; EXPORT_SYMBOL_GPL(audit_enabled); /* Default state when kernel boots without any parameters. */ -static u32 audit_default; +static u32 audit_default = AUDIT_OFF; /* If auditing cannot proceed, audit_failure selects what happens. */ static u32 audit_failure = AUDIT_FAIL_PRINTK; @@ -1552,8 +1552,6 @@ static int __init audit_init(void) register_pernet_subsys(&audit_net_ops); audit_initialized = AUDIT_INITIALIZED; - audit_enabled = audit_default; - audit_ever_enabled |= !!audit_default; kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd"); if (IS_ERR(kauditd_task)) { @@ -1575,6 +1573,8 @@ static int __init audit_enable(char *str) audit_default = !!simple_strtol(str, NULL, 0); if (!audit_default) audit_initialized = AUDIT_DISABLED; + audit_enabled = audit_default; + audit_ever_enabled = !!audit_enabled; pr_info("%s\n", audit_default ? "enabled (after initialization)" : "disabled (until reboot)"); From 2c727856d07198ca0c0d054480e608f0f759f6c5 Mon Sep 17 00:00:00 2001 From: Heinz Mauelshagen Date: Thu, 2 Nov 2017 19:58:28 +0100 Subject: [PATCH 0619/3326] dm raid: fix panic when attempting to force a raid to sync [ Upstream commit 233978449074ca7e45d9c959f9ec612d1b852893 ] Requesting a sync on an active raid device via a table reload (see 'sync' parameter in Documentation/device-mapper/dm-raid.txt) skips the super_load() call that defines the superblock size (rdev->sb_size) -- resulting in an oops if/when super_sync()->memset() is called. Fix by moving the initialization of the superblock start and size out of super_load() to the caller (analyse_superblocks). Signed-off-by: Heinz Mauelshagen Signed-off-by: Mike Snitzer Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-raid.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 2245d06d204597..a25eebd989969d 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2143,13 +2143,6 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) struct dm_raid_superblock *refsb; uint64_t events_sb, events_refsb; - rdev->sb_start = 0; - rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); - if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) { - DMERR("superblock size of a logical block is no longer valid"); - return -EINVAL; - } - r = read_disk_sb(rdev, rdev->sb_size, false); if (r) return r; @@ -2494,6 +2487,17 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) if (test_bit(Journal, &rdev->flags)) continue; + if (!rdev->meta_bdev) + continue; + + /* Set superblock offset/size for metadata device. */ + rdev->sb_start = 0; + rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); + if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) { + DMERR("superblock size of a logical block is no longer valid"); + return -EINVAL; + } + /* * Skipping super_load due to CTR_FLAG_SYNC will cause * the array to undergo initialization again as @@ -2506,9 +2510,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) continue; - if (!rdev->meta_bdev) - continue; - r = super_load(rdev, freshest); switch (r) { From 89a459e0d519b289c15e4da8f52ff5528c7b8a3c Mon Sep 17 00:00:00 2001 From: Zdenek Kabelac Date: Wed, 8 Nov 2017 13:44:56 +0100 Subject: [PATCH 0620/3326] md: free unused memory after bitmap resize [ Upstream commit 0868b99c214a3d55486c700de7c3f770b7243e7c ] When bitmap is resized, the old kalloced chunks just are not released once the resized bitmap starts to use new space. This fixes in particular kmemleak reports like this one: unreferenced object 0xffff8f4311e9c000 (size 4096): comm "lvm", pid 19333, jiffies 4295263268 (age 528.265s) hex dump (first 32 bytes): 02 80 02 80 02 80 02 80 02 80 02 80 02 80 02 80 ................ 02 80 02 80 02 80 02 80 02 80 02 80 02 80 02 80 ................ backtrace: [] kmemleak_alloc+0x4a/0xa0 [] kmem_cache_alloc_trace+0x14e/0x2e0 [] bitmap_checkpage+0x7c/0x110 [] bitmap_get_counter+0x45/0xd0 [] bitmap_set_memory_bits+0x43/0xe0 [] bitmap_init_from_disk+0x23c/0x530 [] bitmap_load+0xbe/0x160 [] raid_preresume+0x203/0x2f0 [dm_raid] [] dm_table_resume_targets+0x4f/0xe0 [] dm_resume+0x122/0x140 [] dev_suspend+0x18f/0x290 [] ctl_ioctl+0x287/0x560 [] dm_ctl_ioctl+0x13/0x20 [] do_vfs_ioctl+0xa6/0x750 [] SyS_ioctl+0x79/0x90 [] entry_SYSCALL_64_fastpath+0x1f/0xc2 Signed-off-by: Zdenek Kabelac Signed-off-by: Shaohua Li Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/md/bitmap.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index f425905c97fa93..0cabf31fb163a3 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -2158,6 +2158,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, for (k = 0; k < page; k++) { kfree(new_bp[k].map); } + kfree(new_bp); /* restore some fields from old_counts */ bitmap->counts.bp = old_counts.bp; @@ -2208,6 +2209,14 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, block += old_blocks; } + if (bitmap->counts.bp != old_counts.bp) { + unsigned long k; + for (k = 0; k < old_counts.pages; k++) + if (!old_counts.bp[k].hijacked) + kfree(old_counts.bp[k].map); + kfree(old_counts.bp); + } + if (!init) { int i; while (block < (chunks << chunkshift)) { From f0a7d7b4298ce318fe07266af899a9b7c4227ad6 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 25 Oct 2017 23:10:19 +0300 Subject: [PATCH 0621/3326] RDMA/cxgb4: Annotate r2 and stag as __be32 [ Upstream commit 7d7d065a5eec7e218174d5c64a9f53f99ffdb119 ] Chelsio cxgb4 HW is big-endian, hence there is need to properly annotate r2 and stag fields as __be32 and not __u32 to fix the following sparse warnings. drivers/infiniband/hw/cxgb4/qp.c:614:16: warning: incorrect type in assignment (different base types) expected unsigned int [unsigned] [usertype] r2 got restricted __be32 [usertype] drivers/infiniband/hw/cxgb4/qp.c:615:18: warning: incorrect type in assignment (different base types) expected unsigned int [unsigned] [usertype] stag got restricted __be32 [usertype] Cc: Steve Wise Signed-off-by: Leon Romanovsky Reviewed-by: Steve Wise Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index 010c709ba3bb53..58c531db4f4aa0 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h @@ -675,8 +675,8 @@ struct fw_ri_fr_nsmr_tpte_wr { __u16 wrid; __u8 r1[3]; __u8 len16; - __u32 r2; - __u32 stag; + __be32 r2; + __be32 stag; struct fw_ri_tpte tpte; __u64 pbl[2]; }; From 93e2d845f265f8ff006f6ac5134f038ad382614e Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Fri, 20 Oct 2017 02:16:58 -0700 Subject: [PATCH 0622/3326] x86/intel_rdt: Fix potential deadlock during resctrl unmount [ Upstream commit 36b6f9fcb8928c06b6638a4cf91bc9d69bb49aa2 ] Lockdep warns about a potential deadlock: [ 66.782842] ====================================================== [ 66.782888] WARNING: possible circular locking dependency detected [ 66.782937] 4.14.0-rc2-test-test+ #48 Not tainted [ 66.782983] ------------------------------------------------------ [ 66.783052] umount/336 is trying to acquire lock: [ 66.783117] (cpu_hotplug_lock.rw_sem){++++}, at: [] rdt_kill_sb+0x215/0x390 [ 66.783193] but task is already holding lock: [ 66.783244] (rdtgroup_mutex){+.+.}, at: [] rdt_kill_sb+0x36/0x390 [ 66.783305] which lock already depends on the new lock. [ 66.783364] the existing dependency chain (in reverse order) is: [ 66.783419] -> #3 (rdtgroup_mutex){+.+.}: [ 66.783467] __lock_acquire+0x1293/0x13f0 [ 66.783509] lock_acquire+0xaf/0x220 [ 66.783543] __mutex_lock+0x71/0x9b0 [ 66.783575] mutex_lock_nested+0x1b/0x20 [ 66.783610] intel_rdt_online_cpu+0x3b/0x430 [ 66.783649] cpuhp_invoke_callback+0xab/0x8e0 [ 66.783687] cpuhp_thread_fun+0x7a/0x150 [ 66.783722] smpboot_thread_fn+0x1cc/0x270 [ 66.783764] kthread+0x16e/0x190 [ 66.783794] ret_from_fork+0x27/0x40 [ 66.783825] -> #2 (cpuhp_state){+.+.}: [ 66.783870] __lock_acquire+0x1293/0x13f0 [ 66.783906] lock_acquire+0xaf/0x220 [ 66.783938] cpuhp_issue_call+0x102/0x170 [ 66.783974] __cpuhp_setup_state_cpuslocked+0x154/0x2a0 [ 66.784023] __cpuhp_setup_state+0xc7/0x170 [ 66.784061] page_writeback_init+0x43/0x67 [ 66.784097] pagecache_init+0x43/0x4a [ 66.784131] start_kernel+0x3ad/0x3f7 [ 66.784165] x86_64_start_reservations+0x2a/0x2c [ 66.784204] x86_64_start_kernel+0x72/0x75 [ 66.784241] verify_cpu+0x0/0xfb [ 66.784270] -> #1 (cpuhp_state_mutex){+.+.}: [ 66.784319] __lock_acquire+0x1293/0x13f0 [ 66.784355] lock_acquire+0xaf/0x220 [ 66.784387] __mutex_lock+0x71/0x9b0 [ 66.784419] mutex_lock_nested+0x1b/0x20 [ 66.784454] __cpuhp_setup_state_cpuslocked+0x52/0x2a0 [ 66.784497] __cpuhp_setup_state+0xc7/0x170 [ 66.784535] page_alloc_init+0x28/0x30 [ 66.784569] start_kernel+0x148/0x3f7 [ 66.784602] x86_64_start_reservations+0x2a/0x2c [ 66.784642] x86_64_start_kernel+0x72/0x75 [ 66.784678] verify_cpu+0x0/0xfb [ 66.784707] -> #0 (cpu_hotplug_lock.rw_sem){++++}: [ 66.784759] check_prev_add+0x32f/0x6e0 [ 66.784794] __lock_acquire+0x1293/0x13f0 [ 66.784830] lock_acquire+0xaf/0x220 [ 66.784863] cpus_read_lock+0x3d/0xb0 [ 66.784896] rdt_kill_sb+0x215/0x390 [ 66.784930] deactivate_locked_super+0x3e/0x70 [ 66.784968] deactivate_super+0x40/0x60 [ 66.785003] cleanup_mnt+0x3f/0x80 [ 66.785034] __cleanup_mnt+0x12/0x20 [ 66.785070] task_work_run+0x8b/0xc0 [ 66.785103] exit_to_usermode_loop+0x94/0xa0 [ 66.786804] syscall_return_slowpath+0xe8/0x150 [ 66.788502] entry_SYSCALL_64_fastpath+0xab/0xad [ 66.790194] other info that might help us debug this: [ 66.795139] Chain exists of: cpu_hotplug_lock.rw_sem --> cpuhp_state --> rdtgroup_mutex [ 66.800035] Possible unsafe locking scenario: [ 66.803267] CPU0 CPU1 [ 66.804867] ---- ---- [ 66.806443] lock(rdtgroup_mutex); [ 66.808002] lock(cpuhp_state); [ 66.809565] lock(rdtgroup_mutex); [ 66.811110] lock(cpu_hotplug_lock.rw_sem); [ 66.812608] *** DEADLOCK *** [ 66.816983] 2 locks held by umount/336: [ 66.818418] #0: (&type->s_umount_key#35){+.+.}, at: [] deactivate_super+0x38/0x60 [ 66.819922] #1: (rdtgroup_mutex){+.+.}, at: [] rdt_kill_sb+0x36/0x390 When the resctrl filesystem is unmounted the locks should be obtain in the locks in the same order as was done when the cpus came online: cpu_hotplug_lock before rdtgroup_mutex. This also requires to switch the static_branch_disable() calls to the _cpulocked variant because now cpu hotplug lock is held already. [ tglx: Switched to cpus_read_[un]lock ] Signed-off-by: Reinette Chatre Signed-off-by: Thomas Gleixner Tested-by: Sai Praneeth Prakhya Acked-by: Vikas Shivappa Acked-by: Fenghua Yu Acked-by: Tony Luck Link: https://lkml.kernel.org/r/cc292e76be073f7260604651711c47b09fd0dc81.1508490116.git.reinette.chatre@intel.com Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 3d433af856a53f..7be35b60029981 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -1297,9 +1297,7 @@ static void rmdir_all_sub(void) kfree(rdtgrp); } /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ - get_online_cpus(); update_closid_rmid(cpu_online_mask, &rdtgroup_default); - put_online_cpus(); kernfs_remove(kn_info); kernfs_remove(kn_mongrp); @@ -1310,6 +1308,7 @@ static void rdt_kill_sb(struct super_block *sb) { struct rdt_resource *r; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); /*Put everything back to default values. */ @@ -1317,11 +1316,12 @@ static void rdt_kill_sb(struct super_block *sb) reset_all_ctrls(r); cdp_disable(); rmdir_all_sub(); - static_branch_disable(&rdt_alloc_enable_key); - static_branch_disable(&rdt_mon_enable_key); - static_branch_disable(&rdt_enable_key); + static_branch_disable_cpuslocked(&rdt_alloc_enable_key); + static_branch_disable_cpuslocked(&rdt_mon_enable_key); + static_branch_disable_cpuslocked(&rdt_enable_key); kernfs_kill_sb(sb); mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); } static struct file_system_type rdt_fs_type = { From ce344631dc13717314184ebd47f5e8b0ff3cfcab Mon Sep 17 00:00:00 2001 From: Daniel Scheller Date: Sun, 29 Oct 2017 11:43:22 -0400 Subject: [PATCH 0623/3326] media: dvb-core: always call invoke_release() in fe_free() commit 62229de19ff2b7f3e0ebf4d48ad99061127d0281 upstream. Follow-up to: ead666000a5f ("media: dvb_frontend: only use kref after initialized") The aforementioned commit fixed refcount OOPSes when demod driver attaching succeeded but tuner driver didn't. However, the use count of the attached demod drivers don't go back to zero and thus couldn't be cleanly unloaded. Improve on this by calling dvb_frontend_invoke_release() in __dvb_frontend_free() regardless of fepriv being NULL, instead of returning when fepriv is NULL. This is safe to do since _invoke_release() will check for passed pointers being valid before calling the .release() function. [mchehab@s-opensource.com: changed the logic a little bit to reduce conflicts with another bug fix patch under review] Fixes: ead666000a5f ("media: dvb_frontend: only use kref after initialized") Signed-off-by: Daniel Scheller Signed-off-by: Mauro Carvalho Chehab Cc: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- drivers/media/dvb-core/dvb_frontend.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 9139d01ba7ed6c..0a823484669915 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -145,13 +145,14 @@ static void __dvb_frontend_free(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; - if (!fepriv) - return; - - dvb_free_device(fepriv->dvbdev); + if (fepriv) + dvb_free_device(fepriv->dvbdev); dvb_frontend_invoke_release(fe, fe->ops.release); + if (!fepriv) + return; + kfree(fepriv); fe->frontend_priv = NULL; } From 7bc8eb30f1e02b4dd6fd2869720c64d9bf39d765 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 7 Nov 2017 08:39:39 -0500 Subject: [PATCH 0624/3326] dvb_frontend: don't use-after-free the frontend struct commit b1cb7372fa822af6c06c8045963571d13ad6348b upstream. dvb_frontend_invoke_release() may free the frontend struct. So, the free logic can't update it anymore after calling it. That's OK, as __dvb_frontend_free() is called only when the krefs are zeroed, so nobody is using it anymore. That should fix the following KASAN error: The KASAN report looks like this (running on kernel 3e0cc09a3a2c40ec1ffb6b4e12da86e98feccb11 (4.14-rc5+)): ================================================================== BUG: KASAN: use-after-free in __dvb_frontend_free+0x113/0x120 Write of size 8 at addr ffff880067d45a00 by task kworker/0:1/24 CPU: 0 PID: 24 Comm: kworker/0:1 Not tainted 4.14.0-rc5-43687-g06ab8a23e0e6 #545 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Workqueue: usb_hub_wq hub_event Call Trace: __dump_stack lib/dump_stack.c:16 dump_stack+0x292/0x395 lib/dump_stack.c:52 print_address_description+0x78/0x280 mm/kasan/report.c:252 kasan_report_error mm/kasan/report.c:351 kasan_report+0x23d/0x350 mm/kasan/report.c:409 __asan_report_store8_noabort+0x1c/0x20 mm/kasan/report.c:435 __dvb_frontend_free+0x113/0x120 drivers/media/dvb-core/dvb_frontend.c:156 dvb_frontend_put+0x59/0x70 drivers/media/dvb-core/dvb_frontend.c:176 dvb_frontend_detach+0x120/0x150 drivers/media/dvb-core/dvb_frontend.c:2803 dvb_usb_adapter_frontend_exit+0xd6/0x160 drivers/media/usb/dvb-usb/dvb-usb-dvb.c:340 dvb_usb_adapter_exit drivers/media/usb/dvb-usb/dvb-usb-init.c:116 dvb_usb_exit+0x9b/0x200 drivers/media/usb/dvb-usb/dvb-usb-init.c:132 dvb_usb_device_exit+0xa5/0xf0 drivers/media/usb/dvb-usb/dvb-usb-init.c:295 usb_unbind_interface+0x21c/0xa90 drivers/usb/core/driver.c:423 __device_release_driver drivers/base/dd.c:861 device_release_driver_internal+0x4f1/0x5c0 drivers/base/dd.c:893 device_release_driver+0x1e/0x30 drivers/base/dd.c:918 bus_remove_device+0x2f4/0x4b0 drivers/base/bus.c:565 device_del+0x5c4/0xab0 drivers/base/core.c:1985 usb_disable_device+0x1e9/0x680 drivers/usb/core/message.c:1170 usb_disconnect+0x260/0x7a0 drivers/usb/core/hub.c:2124 hub_port_connect drivers/usb/core/hub.c:4754 hub_port_connect_change drivers/usb/core/hub.c:5009 port_event drivers/usb/core/hub.c:5115 hub_event+0x1318/0x3740 drivers/usb/core/hub.c:5195 process_one_work+0xc73/0x1d90 kernel/workqueue.c:2119 worker_thread+0x221/0x1850 kernel/workqueue.c:2253 kthread+0x363/0x440 kernel/kthread.c:231 ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431 Allocated by task 24: save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59 save_stack+0x43/0xd0 mm/kasan/kasan.c:447 set_track mm/kasan/kasan.c:459 kasan_kmalloc+0xad/0xe0 mm/kasan/kasan.c:551 kmem_cache_alloc_trace+0x11e/0x2d0 mm/slub.c:2772 kmalloc ./include/linux/slab.h:493 kzalloc ./include/linux/slab.h:666 dtt200u_fe_attach+0x4c/0x110 drivers/media/usb/dvb-usb/dtt200u-fe.c:212 dtt200u_frontend_attach+0x35/0x80 drivers/media/usb/dvb-usb/dtt200u.c:136 dvb_usb_adapter_frontend_init+0x32b/0x660 drivers/media/usb/dvb-usb/dvb-usb-dvb.c:286 dvb_usb_adapter_init drivers/media/usb/dvb-usb/dvb-usb-init.c:86 dvb_usb_init drivers/media/usb/dvb-usb/dvb-usb-init.c:162 dvb_usb_device_init+0xf73/0x17f0 drivers/media/usb/dvb-usb/dvb-usb-init.c:277 dtt200u_usb_probe+0xa1/0xe0 drivers/media/usb/dvb-usb/dtt200u.c:155 usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361 really_probe drivers/base/dd.c:413 driver_probe_device+0x610/0xa00 drivers/base/dd.c:557 __device_attach_driver+0x230/0x290 drivers/base/dd.c:653 bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463 __device_attach+0x26b/0x3c0 drivers/base/dd.c:710 device_initial_probe+0x1f/0x30 drivers/base/dd.c:757 bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523 device_add+0xd0b/0x1660 drivers/base/core.c:1835 usb_set_configuration+0x104e/0x1870 drivers/usb/core/message.c:1932 generic_probe+0x73/0xe0 drivers/usb/core/generic.c:174 usb_probe_device+0xaf/0xe0 drivers/usb/core/driver.c:266 really_probe drivers/base/dd.c:413 driver_probe_device+0x610/0xa00 drivers/base/dd.c:557 __device_attach_driver+0x230/0x290 drivers/base/dd.c:653 bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463 __device_attach+0x26b/0x3c0 drivers/base/dd.c:710 device_initial_probe+0x1f/0x30 drivers/base/dd.c:757 bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523 device_add+0xd0b/0x1660 drivers/base/core.c:1835 usb_new_device+0x7b8/0x1020 drivers/usb/core/hub.c:2457 hub_port_connect drivers/usb/core/hub.c:4903 hub_port_connect_change drivers/usb/core/hub.c:5009 port_event drivers/usb/core/hub.c:5115 hub_event+0x194d/0x3740 drivers/usb/core/hub.c:5195 process_one_work+0xc73/0x1d90 kernel/workqueue.c:2119 worker_thread+0x221/0x1850 kernel/workqueue.c:2253 kthread+0x363/0x440 kernel/kthread.c:231 ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431 Freed by task 24: save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59 save_stack+0x43/0xd0 mm/kasan/kasan.c:447 set_track mm/kasan/kasan.c:459 kasan_slab_free+0x72/0xc0 mm/kasan/kasan.c:524 slab_free_hook mm/slub.c:1390 slab_free_freelist_hook mm/slub.c:1412 slab_free mm/slub.c:2988 kfree+0xf6/0x2f0 mm/slub.c:3919 dtt200u_fe_release+0x3c/0x50 drivers/media/usb/dvb-usb/dtt200u-fe.c:202 dvb_frontend_invoke_release.part.13+0x1c/0x30 drivers/media/dvb-core/dvb_frontend.c:2790 dvb_frontend_invoke_release drivers/media/dvb-core/dvb_frontend.c:2789 __dvb_frontend_free+0xad/0x120 drivers/media/dvb-core/dvb_frontend.c:153 dvb_frontend_put+0x59/0x70 drivers/media/dvb-core/dvb_frontend.c:176 dvb_frontend_detach+0x120/0x150 drivers/media/dvb-core/dvb_frontend.c:2803 dvb_usb_adapter_frontend_exit+0xd6/0x160 drivers/media/usb/dvb-usb/dvb-usb-dvb.c:340 dvb_usb_adapter_exit drivers/media/usb/dvb-usb/dvb-usb-init.c:116 dvb_usb_exit+0x9b/0x200 drivers/media/usb/dvb-usb/dvb-usb-init.c:132 dvb_usb_device_exit+0xa5/0xf0 drivers/media/usb/dvb-usb/dvb-usb-init.c:295 usb_unbind_interface+0x21c/0xa90 drivers/usb/core/driver.c:423 __device_release_driver drivers/base/dd.c:861 device_release_driver_internal+0x4f1/0x5c0 drivers/base/dd.c:893 device_release_driver+0x1e/0x30 drivers/base/dd.c:918 bus_remove_device+0x2f4/0x4b0 drivers/base/bus.c:565 device_del+0x5c4/0xab0 drivers/base/core.c:1985 usb_disable_device+0x1e9/0x680 drivers/usb/core/message.c:1170 usb_disconnect+0x260/0x7a0 drivers/usb/core/hub.c:2124 hub_port_connect drivers/usb/core/hub.c:4754 hub_port_connect_change drivers/usb/core/hub.c:5009 port_event drivers/usb/core/hub.c:5115 hub_event+0x1318/0x3740 drivers/usb/core/hub.c:5195 process_one_work+0xc73/0x1d90 kernel/workqueue.c:2119 worker_thread+0x221/0x1850 kernel/workqueue.c:2253 kthread+0x363/0x440 kernel/kthread.c:231 ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431 The buggy address belongs to the object at ffff880067d45500 which belongs to the cache kmalloc-2048 of size 2048 The buggy address is located 1280 bytes inside of 2048-byte region [ffff880067d45500, ffff880067d45d00) The buggy address belongs to the page: page:ffffea00019f5000 count:1 mapcount:0 mapping: (null) index:0x0 compound_mapcount: 0 flags: 0x100000000008100(slab|head) raw: 0100000000008100 0000000000000000 0000000000000000 00000001000f000f raw: dead000000000100 dead000000000200 ffff88006c002d80 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff880067d45900: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff880067d45980: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff880067d45a00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff880067d45a80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff880067d45b00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ================================================================== Fixes: ead666000a5f ("media: dvb_frontend: only use kref after initialized") Reported-by: Andrey Konovalov Suggested-by: Matthias Schwarzott Tested-by: Andrey Konovalov Signed-off-by: Mauro Carvalho Chehab Cc: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- drivers/media/dvb-core/dvb_frontend.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 0a823484669915..33d844fe2e703f 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -150,11 +150,8 @@ static void __dvb_frontend_free(struct dvb_frontend *fe) dvb_frontend_invoke_release(fe, fe->ops.release); - if (!fepriv) - return; - - kfree(fepriv); - fe->frontend_priv = NULL; + if (fepriv) + kfree(fepriv); } static void dvb_frontend_free(struct kref *ref) From 3afae8437c3cbc22966762e80e81818f5a90eb06 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sun, 17 Dec 2017 15:08:14 +0100 Subject: [PATCH 0625/3326] Linux 4.14.7 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 793183f9e3e519..39d7af0165a8fd 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 6 +SUBLEVEL = 7 EXTRAVERSION = NAME = Petit Gorille From 3d27b022022a88e189c0e9d63c4ac01af354735f Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Tue, 17 Oct 2017 22:53:08 +0200 Subject: [PATCH 0626/3326] mfd: fsl-imx25: Clean up irq settings during removal commit 18f77393796848e68909e65d692c1d1436f06e06 upstream. When fsl-imx25-tsadc is compiled as a module, loading, unloading and reloading the module will lead to a crash. Unable to handle kernel paging request at virtual address bf005430 [] (irq_find_matching_fwspec) from [] (of_irq_get+0x58/0x74) [] (of_irq_get) from [] (platform_get_irq+0x48/0xc8) [] (platform_get_irq) from [] (mx25_tsadc_probe+0x220/0x2f4 [fsl_imx25_tsadc]) irq_find_matching_fwspec() loops over all registered irq domains. The irq domain is still registered from last time the module was loaded but the pointer to its operations is invalid after the module was unloaded. Add a removal function which clears the irq handler and removes the irq domain. With this cleanup in place, it's possible to unload and reload the module. Signed-off-by: Martin Kaiser Reviewed-by: Lucas Stach Signed-off-by: Lee Jones Signed-off-by: Greg Kroah-Hartman --- drivers/mfd/fsl-imx25-tsadc.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c index b3767c3141e58a..461b0990b56fcf 100644 --- a/drivers/mfd/fsl-imx25-tsadc.c +++ b/drivers/mfd/fsl-imx25-tsadc.c @@ -180,6 +180,19 @@ static int mx25_tsadc_probe(struct platform_device *pdev) return devm_of_platform_populate(dev); } +static int mx25_tsadc_remove(struct platform_device *pdev) +{ + struct mx25_tsadc *tsadc = platform_get_drvdata(pdev); + int irq = platform_get_irq(pdev, 0); + + if (irq) { + irq_set_chained_handler_and_data(irq, NULL, NULL); + irq_domain_remove(tsadc->domain); + } + + return 0; +} + static const struct of_device_id mx25_tsadc_ids[] = { { .compatible = "fsl,imx25-tsadc" }, { /* Sentinel */ } @@ -192,6 +205,7 @@ static struct platform_driver mx25_tsadc_driver = { .of_match_table = of_match_ptr(mx25_tsadc_ids), }, .probe = mx25_tsadc_probe, + .remove = mx25_tsadc_remove, }; module_platform_driver(mx25_tsadc_driver); From 96c2dfaebe1a8eba95d43732a1413c777469128c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 27 Nov 2017 23:23:05 -0800 Subject: [PATCH 0627/3326] crypto: algif_aead - fix reference counting of null skcipher commit b32a7dc8aef1882fbf983eb354837488cc9d54dc upstream. In the AEAD interface for AF_ALG, the reference to the "null skcipher" held by each tfm was being dropped in the wrong place -- when each af_alg_ctx was freed instead of when the aead_tfm was freed. As discovered by syzkaller, a specially crafted program could use this to cause the null skcipher to be freed while it is still in use. Fix it by dropping the reference in the right place. Fixes: 72548b093ee3 ("crypto: algif_aead - copy AAD from src to dst") Reported-by: syzbot Signed-off-by: Eric Biggers Reviewed-by: Stephan Mueller Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/algif_aead.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index d0b45145cb307b..3d793bc2aa82c0 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -503,6 +503,7 @@ static void aead_release(void *private) struct aead_tfm *tfm = private; crypto_free_aead(tfm->aead); + crypto_put_default_null_skcipher2(); kfree(tfm); } @@ -535,7 +536,6 @@ static void aead_sock_destruct(struct sock *sk) unsigned int ivlen = crypto_aead_ivsize(tfm); af_alg_pull_tsgl(sk, ctx->used, NULL, 0); - crypto_put_default_null_skcipher2(); sock_kzfree_s(sk, ctx->iv, ivlen); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); From 80dbdc5ae74d6167edf081f8865df06fb6615578 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 26 Nov 2017 23:16:49 -0800 Subject: [PATCH 0628/3326] crypto: rsa - fix buffer overread when stripping leading zeroes commit d2890c3778b164fde587bc16583f3a1c87233ec5 upstream. In rsa_get_n(), if the buffer contained all 0's and "FIPS mode" is enabled, we would read one byte past the end of the buffer while scanning the leading zeroes. Fix it by checking 'n_sz' before '!*ptr'. This bug was reachable by adding a specially crafted key of type "asymmetric" (requires CONFIG_RSA and CONFIG_X509_CERTIFICATE_PARSER). KASAN report: BUG: KASAN: slab-out-of-bounds in rsa_get_n+0x19e/0x1d0 crypto/rsa_helper.c:33 Read of size 1 at addr ffff88003501a708 by task keyctl/196 CPU: 1 PID: 196 Comm: keyctl Not tainted 4.14.0-09238-g1d3b78bbc6e9 #26 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.11.0-20171110_100015-anatol 04/01/2014 Call Trace: rsa_get_n+0x19e/0x1d0 crypto/rsa_helper.c:33 asn1_ber_decoder+0x82a/0x1fd0 lib/asn1_decoder.c:328 rsa_set_pub_key+0xd3/0x320 crypto/rsa.c:278 crypto_akcipher_set_pub_key ./include/crypto/akcipher.h:364 [inline] pkcs1pad_set_pub_key+0xae/0x200 crypto/rsa-pkcs1pad.c:117 crypto_akcipher_set_pub_key ./include/crypto/akcipher.h:364 [inline] public_key_verify_signature+0x270/0x9d0 crypto/asymmetric_keys/public_key.c:106 x509_check_for_self_signed+0x2ea/0x480 crypto/asymmetric_keys/x509_public_key.c:141 x509_cert_parse+0x46a/0x620 crypto/asymmetric_keys/x509_cert_parser.c:129 x509_key_preparse+0x61/0x750 crypto/asymmetric_keys/x509_public_key.c:174 asymmetric_key_preparse+0xa4/0x150 crypto/asymmetric_keys/asymmetric_type.c:388 key_create_or_update+0x4d4/0x10a0 security/keys/key.c:850 SYSC_add_key security/keys/keyctl.c:122 [inline] SyS_add_key+0xe8/0x290 security/keys/keyctl.c:62 entry_SYSCALL_64_fastpath+0x1f/0x96 Allocated by task 196: __do_kmalloc mm/slab.c:3711 [inline] __kmalloc_track_caller+0x118/0x2e0 mm/slab.c:3726 kmemdup+0x17/0x40 mm/util.c:118 kmemdup ./include/linux/string.h:414 [inline] x509_cert_parse+0x2cb/0x620 crypto/asymmetric_keys/x509_cert_parser.c:106 x509_key_preparse+0x61/0x750 crypto/asymmetric_keys/x509_public_key.c:174 asymmetric_key_preparse+0xa4/0x150 crypto/asymmetric_keys/asymmetric_type.c:388 key_create_or_update+0x4d4/0x10a0 security/keys/key.c:850 SYSC_add_key security/keys/keyctl.c:122 [inline] SyS_add_key+0xe8/0x290 security/keys/keyctl.c:62 entry_SYSCALL_64_fastpath+0x1f/0x96 Fixes: 5a7de97309f5 ("crypto: rsa - return raw integers for the ASN.1 parser") Cc: Tudor Ambarus Signed-off-by: Eric Biggers Reviewed-by: James Morris Reviewed-by: David Howells Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/rsa_helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c index 0b66dc8246068a..cad395d70d78e1 100644 --- a/crypto/rsa_helper.c +++ b/crypto/rsa_helper.c @@ -30,7 +30,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag, return -EINVAL; if (fips_enabled) { - while (!*ptr && n_sz) { + while (n_sz && !*ptr) { ptr++; n_sz--; } From 902ae89f841de0c8d2857919296923f6332e174f Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 28 Nov 2017 18:01:38 -0800 Subject: [PATCH 0629/3326] crypto: hmac - require that the underlying hash algorithm is unkeyed commit af3ff8045bbf3e32f1a448542e73abb4c8ceb6f1 upstream. Because the HMAC template didn't check that its underlying hash algorithm is unkeyed, trying to use "hmac(hmac(sha3-512-generic))" through AF_ALG or through KEYCTL_DH_COMPUTE resulted in the inner HMAC being used without having been keyed, resulting in sha3_update() being called without sha3_init(), causing a stack buffer overflow. This is a very old bug, but it seems to have only started causing real problems when SHA-3 support was added (requires CONFIG_CRYPTO_SHA3) because the innermost hash's state is ->import()ed from a zeroed buffer, and it just so happens that other hash algorithms are fine with that, but SHA-3 is not. However, there could be arch or hardware-dependent hash algorithms also affected; I couldn't test everything. Fix the bug by introducing a function crypto_shash_alg_has_setkey() which tests whether a shash algorithm is keyed. Then update the HMAC template to require that its underlying hash algorithm is unkeyed. Here is a reproducer: #include #include int main() { int algfd; struct sockaddr_alg addr = { .salg_type = "hash", .salg_name = "hmac(hmac(sha3-512-generic))", }; char key[4096] = { 0 }; algfd = socket(AF_ALG, SOCK_SEQPACKET, 0); bind(algfd, (const struct sockaddr *)&addr, sizeof(addr)); setsockopt(algfd, SOL_ALG, ALG_SET_KEY, key, sizeof(key)); } Here was the KASAN report from syzbot: BUG: KASAN: stack-out-of-bounds in memcpy include/linux/string.h:341 [inline] BUG: KASAN: stack-out-of-bounds in sha3_update+0xdf/0x2e0 crypto/sha3_generic.c:161 Write of size 4096 at addr ffff8801cca07c40 by task syzkaller076574/3044 CPU: 1 PID: 3044 Comm: syzkaller076574 Not tainted 4.14.0-mm1+ #25 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:17 [inline] dump_stack+0x194/0x257 lib/dump_stack.c:53 print_address_description+0x73/0x250 mm/kasan/report.c:252 kasan_report_error mm/kasan/report.c:351 [inline] kasan_report+0x25b/0x340 mm/kasan/report.c:409 check_memory_region_inline mm/kasan/kasan.c:260 [inline] check_memory_region+0x137/0x190 mm/kasan/kasan.c:267 memcpy+0x37/0x50 mm/kasan/kasan.c:303 memcpy include/linux/string.h:341 [inline] sha3_update+0xdf/0x2e0 crypto/sha3_generic.c:161 crypto_shash_update+0xcb/0x220 crypto/shash.c:109 shash_finup_unaligned+0x2a/0x60 crypto/shash.c:151 crypto_shash_finup+0xc4/0x120 crypto/shash.c:165 hmac_finup+0x182/0x330 crypto/hmac.c:152 crypto_shash_finup+0xc4/0x120 crypto/shash.c:165 shash_digest_unaligned+0x9e/0xd0 crypto/shash.c:172 crypto_shash_digest+0xc4/0x120 crypto/shash.c:186 hmac_setkey+0x36a/0x690 crypto/hmac.c:66 crypto_shash_setkey+0xad/0x190 crypto/shash.c:64 shash_async_setkey+0x47/0x60 crypto/shash.c:207 crypto_ahash_setkey+0xaf/0x180 crypto/ahash.c:200 hash_setkey+0x40/0x90 crypto/algif_hash.c:446 alg_setkey crypto/af_alg.c:221 [inline] alg_setsockopt+0x2a1/0x350 crypto/af_alg.c:254 SYSC_setsockopt net/socket.c:1851 [inline] SyS_setsockopt+0x189/0x360 net/socket.c:1830 entry_SYSCALL_64_fastpath+0x1f/0x96 Reported-by: syzbot Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/hmac.c | 6 +++++- crypto/shash.c | 5 +++-- include/crypto/internal/hash.h | 8 ++++++++ 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/crypto/hmac.c b/crypto/hmac.c index 92871dc2a63ec6..e74730224f0a5f 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -195,11 +195,15 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) salg = shash_attr_alg(tb[1], 0, 0); if (IS_ERR(salg)) return PTR_ERR(salg); + alg = &salg->base; + /* The underlying hash algorithm must be unkeyed */ err = -EINVAL; + if (crypto_shash_alg_has_setkey(salg)) + goto out_put_alg; + ds = salg->digestsize; ss = salg->statesize; - alg = &salg->base; if (ds > alg->cra_blocksize || ss < alg->cra_blocksize) goto out_put_alg; diff --git a/crypto/shash.c b/crypto/shash.c index 325a14da58278f..e849d3ee2e2728 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -25,11 +25,12 @@ static const struct crypto_type crypto_shash_type; -static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) +int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) { return -ENOSYS; } +EXPORT_SYMBOL_GPL(shash_no_setkey); static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index f0b44c16e88f24..c2bae8da642cba 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -82,6 +82,14 @@ int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst); void ahash_free_instance(struct crypto_instance *inst); +int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); + +static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) +{ + return alg->setkey != shash_no_setkey; +} + int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, struct hash_alg_common *alg, struct crypto_instance *inst); From c68b31521d5fb7216cb1113130399afe65437c6c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 28 Nov 2017 20:56:59 -0800 Subject: [PATCH 0630/3326] crypto: salsa20 - fix blkcipher_walk API usage commit ecaaab5649781c5a0effdaf298a925063020500e upstream. When asked to encrypt or decrypt 0 bytes, both the generic and x86 implementations of Salsa20 crash in blkcipher_walk_done(), either when doing 'kfree(walk->buffer)' or 'free_page((unsigned long)walk->page)', because walk->buffer and walk->page have not been initialized. The bug is that Salsa20 is calling blkcipher_walk_done() even when nothing is in 'walk.nbytes'. But blkcipher_walk_done() is only meant to be called when a nonzero number of bytes have been provided. The broken code is part of an optimization that tries to make only one call to salsa20_encrypt_bytes() to process inputs that are not evenly divisible by 64 bytes. To fix the bug, just remove this "optimization" and use the blkcipher_walk API the same way all the other users do. Reproducer: #include #include #include int main() { int algfd, reqfd; struct sockaddr_alg addr = { .salg_type = "skcipher", .salg_name = "salsa20", }; char key[16] = { 0 }; algfd = socket(AF_ALG, SOCK_SEQPACKET, 0); bind(algfd, (void *)&addr, sizeof(addr)); reqfd = accept(algfd, 0, 0); setsockopt(algfd, SOL_ALG, ALG_SET_KEY, key, sizeof(key)); read(reqfd, key, sizeof(key)); } Reported-by: syzbot Fixes: eb6f13eb9f81 ("[CRYPTO] salsa20_generic: Fix multi-page processing") Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- arch/x86/crypto/salsa20_glue.c | 7 ------- crypto/salsa20_generic.c | 7 ------- 2 files changed, 14 deletions(-) diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c index 399a29d067d636..cb91a64a99e7cd 100644 --- a/arch/x86/crypto/salsa20_glue.c +++ b/arch/x86/crypto/salsa20_glue.c @@ -59,13 +59,6 @@ static int encrypt(struct blkcipher_desc *desc, salsa20_ivsetup(ctx, walk.iv); - if (likely(walk.nbytes == nbytes)) - { - salsa20_encrypt_bytes(ctx, walk.src.virt.addr, - walk.dst.virt.addr, nbytes); - return blkcipher_walk_done(desc, &walk, 0); - } - while (walk.nbytes >= 64) { salsa20_encrypt_bytes(ctx, walk.src.virt.addr, walk.dst.virt.addr, diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index f550b5d9463074..d7da0eea5622af 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -188,13 +188,6 @@ static int encrypt(struct blkcipher_desc *desc, salsa20_ivsetup(ctx, walk.iv); - if (likely(walk.nbytes == nbytes)) - { - salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, - walk.src.virt.addr, nbytes); - return blkcipher_walk_done(desc, &walk, 0); - } - while (walk.nbytes >= 64) { salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, walk.src.virt.addr, From cf1048e46d4f301261d946e73add464fb95fd5a4 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 28 Nov 2017 00:46:24 -0800 Subject: [PATCH 0631/3326] crypto: af_alg - fix NULL pointer dereference in commit 887207ed9e5812ed9239b6d07185a2d35dda91db upstream. af_alg_free_areq_sgls() If allocating the ->tsgl member of 'struct af_alg_async_req' failed, during cleanup we dereferenced the NULL ->tsgl pointer in af_alg_free_areq_sgls(), because ->tsgl_entries was nonzero. Fix it by only freeing the ->tsgl list if it is non-NULL. This affected both algif_skcipher and algif_aead. Fixes: e870456d8e7c ("crypto: algif_skcipher - overhaul memory management") Fixes: d887c52d6ae4 ("crypto: algif_aead - overhaul memory management") Reported-by: syzbot Signed-off-by: Eric Biggers Reviewed-by: Stephan Mueller Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/af_alg.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index a72659f452a5bb..e181073ef64d24 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -699,14 +699,15 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq) } tsgl = areq->tsgl; - for_each_sg(tsgl, sg, areq->tsgl_entries, i) { - if (!sg_page(sg)) - continue; - put_page(sg_page(sg)); - } + if (tsgl) { + for_each_sg(tsgl, sg, areq->tsgl_entries, i) { + if (!sg_page(sg)) + continue; + put_page(sg_page(sg)); + } - if (areq->tsgl && areq->tsgl_entries) sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); + } } EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); From 6f3fc5f95975e101b198ecf223ad6f9f8994b191 Mon Sep 17 00:00:00 2001 From: Ronnie Sahlberg Date: Tue, 21 Nov 2017 09:36:33 +1100 Subject: [PATCH 0632/3326] cifs: fix NULL deref in SMB2_read commit a821df3f1af72aa6a0d573eea94a7dd2613e9f4e upstream. Signed-off-by: Ronnie Sahlberg Reviewed-by: Pavel Shilovsky Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/cifs/smb2pdu.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 5331631386a23b..01346b8b6edb38 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -2678,27 +2678,27 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, cifs_small_buf_release(req); rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; - shdr = get_sync_hdr(rsp); - if (shdr->Status == STATUS_END_OF_FILE) { + if (rc) { + if (rc != -ENODATA) { + cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); + cifs_dbg(VFS, "Send error in read = %d\n", rc); + } free_rsp_buf(resp_buftype, rsp_iov.iov_base); - return 0; + return rc == -ENODATA ? 0 : rc; } - if (rc) { - cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); - cifs_dbg(VFS, "Send error in read = %d\n", rc); - } else { - *nbytes = le32_to_cpu(rsp->DataLength); - if ((*nbytes > CIFS_MAX_MSGSIZE) || - (*nbytes > io_parms->length)) { - cifs_dbg(FYI, "bad length %d for count %d\n", - *nbytes, io_parms->length); - rc = -EIO; - *nbytes = 0; - } + *nbytes = le32_to_cpu(rsp->DataLength); + if ((*nbytes > CIFS_MAX_MSGSIZE) || + (*nbytes > io_parms->length)) { + cifs_dbg(FYI, "bad length %d for count %d\n", + *nbytes, io_parms->length); + rc = -EIO; + *nbytes = 0; } + shdr = get_sync_hdr(rsp); + if (*buf) { memcpy(*buf, (char *)shdr + rsp->DataOffset, *nbytes); free_rsp_buf(resp_buftype, rsp_iov.iov_base); From c8def7a4185b8b1ec2b843af346977f00259414e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 14 Dec 2017 15:32:34 -0800 Subject: [PATCH 0633/3326] string.h: workaround for increased stack usage commit 146734b091430c80d80bb96b1139a96fb4bc830e upstream. The hardened strlen() function causes rather large stack usage in at least one file in the kernel, in particular when CONFIG_KASAN is enabled: drivers/media/usb/em28xx/em28xx-dvb.c: In function 'em28xx_dvb_init': drivers/media/usb/em28xx/em28xx-dvb.c:2062:1: error: the frame size of 3256 bytes is larger than 204 bytes [-Werror=frame-larger-than=] Analyzing this problem led to the discovery that gcc fails to merge the stack slots for the i2c_board_info[] structures after we strlcpy() into them, due to the 'noreturn' attribute on the source string length check. I reported this as a gcc bug, but it is unlikely to get fixed for gcc-8, since it is relatively easy to work around, and it gets triggered rarely. An earlier workaround I did added an empty inline assembly statement before the call to fortify_panic(), which works surprisingly well, but is really ugly and unintuitive. This is a new approach to the same problem, this time addressing it by not calling the 'extern __real_strnlen()' function for string constants where __builtin_strlen() is a compile-time constant and therefore known to be safe. We do this by checking if the last character in the string is a compile-time constant '\0'. If it is, we can assume that strlen() of the string is also constant. As a side-effect, this should also improve the object code output for any other call of strlen() on a string constant. [akpm@linux-foundation.org: add comment] Link: http://lkml.kernel.org/r/20171205215143.3085755-1-arnd@arndb.de Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365 Link: https://patchwork.kernel.org/patch/9980413/ Link: https://patchwork.kernel.org/patch/9974047/ Fixes: 6974f0c4555 ("include/linux/string.h: add the option of fortified string.h functions") Signed-off-by: Arnd Bergmann Cc: Kees Cook Cc: Mauro Carvalho Chehab Cc: Dmitry Vyukov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Daniel Micay Cc: Greg Kroah-Hartman Cc: Martin Wilck Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/string.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/include/linux/string.h b/include/linux/string.h index 410ecf17de3ce5..cfd83eb2f926c7 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -259,7 +259,10 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) { __kernel_size_t ret; size_t p_size = __builtin_object_size(p, 0); - if (p_size == (size_t)-1) + + /* Work around gcc excess stack consumption issue */ + if (p_size == (size_t)-1 || + (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) return __builtin_strlen(p); ret = strnlen(p, p_size); if (p_size <= ret) From d3d2f01a6eaf6022167c94afba34b6d6722392c9 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 14 Dec 2017 15:32:38 -0800 Subject: [PATCH 0634/3326] autofs: fix careless error in recent commit commit 302ec300ef8a545a7fc7f667e5fd743b091c2eeb upstream. Commit ecc0c469f277 ("autofs: don't fail mount for transient error") was meant to replace an 'if' with a 'switch', but instead added the 'switch' leaving the case in place. Link: http://lkml.kernel.org/r/87zi6wstmw.fsf@notabene.neil.brown.name Fixes: ecc0c469f277 ("autofs: don't fail mount for transient error") Reported-by: Ben Hutchings Signed-off-by: NeilBrown Cc: Ian Kent Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/autofs4/waitq.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 8fc41705c7cd50..961a12dc6dc81f 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -170,7 +170,6 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, mutex_unlock(&sbi->wq_mutex); - if (autofs4_write(sbi, pipe, &pkt, pktsz)) switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) { case 0: break; From c5d9b78d530a45f0c226317764a7865f7911ab46 Mon Sep 17 00:00:00 2001 From: Thiago Rafael Becker Date: Thu, 14 Dec 2017 15:33:12 -0800 Subject: [PATCH 0635/3326] kernel: make groups_sort calling a responsibility group_info allocators commit bdcf0a423ea1c40bbb40e7ee483b50fc8aa3d758 upstream. In testing, we found that nfsd threads may call set_groups in parallel for the same entry cached in auth.unix.gid, racing in the call of groups_sort, corrupting the groups for that entry and leading to permission denials for the client. This patch: - Make groups_sort globally visible. - Move the call to groups_sort to the modifiers of group_info - Remove the call to groups_sort from set_groups Link: http://lkml.kernel.org/r/20171211151420.18655-1-thiago.becker@gmail.com Signed-off-by: Thiago Rafael Becker Reviewed-by: Matthew Wilcox Reviewed-by: NeilBrown Acked-by: "J. Bruce Fields" Cc: Al Viro Cc: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/s390/kernel/compat_linux.c | 1 + fs/nfsd/auth.c | 3 +++ include/linux/cred.h | 1 + kernel/groups.c | 5 +++-- kernel/uid16.c | 1 + net/sunrpc/auth_gss/gss_rpc_xdr.c | 1 + net/sunrpc/auth_gss/svcauth_gss.c | 1 + net/sunrpc/svcauth_unix.c | 2 ++ 8 files changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index f04db3779b3450..59eea9c65d3e9e 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -263,6 +263,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis return retval; } + groups_sort(group_info); retval = set_current_groups(group_info); put_group_info(group_info); diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c index 697f8ae7792d13..f650e475d8f0d8 100644 --- a/fs/nfsd/auth.c +++ b/fs/nfsd/auth.c @@ -60,6 +60,9 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp) gi->gid[i] = exp->ex_anon_gid; else gi->gid[i] = rqgi->gid[i]; + + /* Each thread allocates its own gi, no race */ + groups_sort(gi); } } else { gi = get_group_info(rqgi); diff --git a/include/linux/cred.h b/include/linux/cred.h index 099058e1178b4d..631286535d0f12 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -83,6 +83,7 @@ extern int set_current_groups(struct group_info *); extern void set_groups(struct cred *, struct group_info *); extern int groups_search(const struct group_info *, kgid_t); extern bool may_setgroups(void); +extern void groups_sort(struct group_info *); /* * The security context of a task diff --git a/kernel/groups.c b/kernel/groups.c index e357bc80011104..daae2f2dc6d4f6 100644 --- a/kernel/groups.c +++ b/kernel/groups.c @@ -86,11 +86,12 @@ static int gid_cmp(const void *_a, const void *_b) return gid_gt(a, b) - gid_lt(a, b); } -static void groups_sort(struct group_info *group_info) +void groups_sort(struct group_info *group_info) { sort(group_info->gid, group_info->ngroups, sizeof(*group_info->gid), gid_cmp, NULL); } +EXPORT_SYMBOL(groups_sort); /* a simple bsearch */ int groups_search(const struct group_info *group_info, kgid_t grp) @@ -122,7 +123,6 @@ int groups_search(const struct group_info *group_info, kgid_t grp) void set_groups(struct cred *new, struct group_info *group_info) { put_group_info(new->group_info); - groups_sort(group_info); get_group_info(group_info); new->group_info = group_info; } @@ -206,6 +206,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist) return retval; } + groups_sort(group_info); retval = set_current_groups(group_info); put_group_info(group_info); diff --git a/kernel/uid16.c b/kernel/uid16.c index ce74a4901d2b05..ef1da2a5f9bd00 100644 --- a/kernel/uid16.c +++ b/kernel/uid16.c @@ -192,6 +192,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist) return retval; } + groups_sort(group_info); retval = set_current_groups(group_info); put_group_info(group_info); diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c index c4778cae58ef12..444380f968f115 100644 --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c @@ -231,6 +231,7 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr, goto out_free_groups; creds->cr_group_info->gid[i] = kgid; } + groups_sort(creds->cr_group_info); return 0; out_free_groups: diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 7b1ee5a0b03cd1..f41ffb22652c76 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -481,6 +481,7 @@ static int rsc_parse(struct cache_detail *cd, goto out; rsci.cred.cr_group_info->gid[i] = kgid; } + groups_sort(rsci.cred.cr_group_info); /* mech name */ len = qword_get(&mesg, buf, mlen); diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index f81eaa8e08888a..acb70d235e475a 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -520,6 +520,7 @@ static int unix_gid_parse(struct cache_detail *cd, ug.gi->gid[i] = kgid; } + groups_sort(ug.gi); ugp = unix_gid_lookup(cd, uid); if (ugp) { struct cache_head *ch; @@ -819,6 +820,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv)); cred->cr_group_info->gid[i] = kgid; } + groups_sort(cred->cr_group_info); if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { *authp = rpc_autherr_badverf; return SVC_DENIED; From 55fe4698d80ecb553d015b5df96ccf9584cbc06b Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Thu, 14 Dec 2017 15:33:15 -0800 Subject: [PATCH 0636/3326] mm, oom_reaper: fix memory corruption commit 4837fe37adff1d159904f0c013471b1ecbcb455e upstream. David Rientjes has reported the following memory corruption while the oom reaper tries to unmap the victims address space BUG: Bad page map in process oom_reaper pte:6353826300000000 pmd:00000000 addr:00007f50cab1d000 vm_flags:08100073 anon_vma:ffff9eea335603f0 mapping: (null) index:7f50cab1d file: (null) fault: (null) mmap: (null) readpage: (null) CPU: 2 PID: 1001 Comm: oom_reaper Call Trace: unmap_page_range+0x1068/0x1130 __oom_reap_task_mm+0xd5/0x16b oom_reaper+0xff/0x14c kthread+0xc1/0xe0 Tetsuo Handa has noticed that the synchronization inside exit_mmap is insufficient. We only synchronize with the oom reaper if tsk_is_oom_victim which is not true if the final __mmput is called from a different context than the oom victim exit path. This can trivially happen from context of any task which has grabbed mm reference (e.g. to read /proc// file which requires mm etc.). The race would look like this oom_reaper oom_victim task mmget_not_zero do_exit mmput __oom_reap_task_mm mmput __mmput exit_mmap remove_vma unmap_page_range Fix this issue by providing a new mm_is_oom_victim() helper which operates on the mm struct rather than a task. Any context which operates on a remote mm struct should use this helper in place of tsk_is_oom_victim. The flag is set in mark_oom_victim and never cleared so it is stable in the exit_mmap path. Debugged by Tetsuo Handa. Link: http://lkml.kernel.org/r/20171210095130.17110-1-mhocko@kernel.org Fixes: 212925802454 ("mm: oom: let oom_reap_task and exit_mmap run concurrently") Signed-off-by: Michal Hocko Reported-by: David Rientjes Acked-by: David Rientjes Cc: Tetsuo Handa Cc: Andrea Argangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/oom.h | 9 +++++++++ include/linux/sched/coredump.h | 1 + mm/mmap.c | 10 +++++----- mm/oom_kill.c | 4 +++- 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/include/linux/oom.h b/include/linux/oom.h index 01c91d874a57f5..5bad038ac012e6 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -66,6 +66,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk) return tsk->signal->oom_mm; } +/* + * Use this helper if tsk->mm != mm and the victim mm needs a special + * handling. This is guaranteed to stay true after once set. + */ +static inline bool mm_is_oom_victim(struct mm_struct *mm) +{ + return test_bit(MMF_OOM_VICTIM, &mm->flags); +} + /* * Checks whether a page fault on the given mm is still reliable. * This is no longer true if the oom reaper started to reap the diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index 9c8847395b5e15..ec912d01126f4b 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ +#define MMF_OOM_VICTIM 25 /* mm is the oom victim */ #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ diff --git a/mm/mmap.c b/mm/mmap.c index 476e810cf10032..0de87a376aaa18 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3004,20 +3004,20 @@ void exit_mmap(struct mm_struct *mm) /* Use -1 here to ensure all VMAs in the mm are unmapped */ unmap_vmas(&tlb, vma, 0, -1); - set_bit(MMF_OOM_SKIP, &mm->flags); - if (unlikely(tsk_is_oom_victim(current))) { + if (unlikely(mm_is_oom_victim(mm))) { /* * Wait for oom_reap_task() to stop working on this * mm. Because MMF_OOM_SKIP is already set before * calling down_read(), oom_reap_task() will not run * on this "mm" post up_write(). * - * tsk_is_oom_victim() cannot be set from under us - * either because current->mm is already set to NULL + * mm_is_oom_victim() cannot be set from under us + * either because victim->mm is already set to NULL * under task_lock before calling mmput and oom_mm is - * set not NULL by the OOM killer only if current->mm + * set not NULL by the OOM killer only if victim->mm * is found not NULL while holding the task_lock. */ + set_bit(MMF_OOM_SKIP, &mm->flags); down_write(&mm->mmap_sem); up_write(&mm->mmap_sem); } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 18c5b356b5053c..10aed8d8c08077 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -673,8 +673,10 @@ static void mark_oom_victim(struct task_struct *tsk) return; /* oom_mm is bound to the signal struct life time. */ - if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) + if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) { mmgrab(tsk->signal->oom_mm); + set_bit(MMF_OOM_VICTIM, &mm->flags); + } /* * Make sure that the task is woken up from uninterruptible sleep From b8582c0f792fc1603a8998de60a156359f15dfd4 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Thu, 30 Nov 2017 11:39:43 +0800 Subject: [PATCH 0637/3326] tracing: Allocate mask_str buffer dynamically commit 90e406f96f630c07d631a021fd4af10aac913e77 upstream. The default NR_CPUS can be very large, but actual possible nr_cpu_ids usually is very small. For my x86 distribution, the NR_CPUS is 8192 and nr_cpu_ids is 4. About 2 pages are wasted. Most machines don't have so many CPUs, so define a array with NR_CPUS just wastes memory. So let's allocate the buffer dynamically when need. With this change, the mutext tracing_cpumask_update_lock also can be removed now, which was used to protect mask_str. Link: http://lkml.kernel.org/r/1512013183-19107-1-git-send-email-changbin.du@intel.com Fixes: 36dfe9252bd4c ("ftrace: make use of tracing_cpumask") Signed-off-by: Changbin Du Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace.c | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 752e5daf0896fc..80de14973b42b8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4178,37 +4178,30 @@ static const struct file_operations show_traces_fops = { .llseek = seq_lseek, }; -/* - * The tracer itself will not take this lock, but still we want - * to provide a consistent cpumask to user-space: - */ -static DEFINE_MUTEX(tracing_cpumask_update_lock); - -/* - * Temporary storage for the character representation of the - * CPU bitmask (and one more byte for the newline): - */ -static char mask_str[NR_CPUS + 1]; - static ssize_t tracing_cpumask_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; + char *mask_str; int len; - mutex_lock(&tracing_cpumask_update_lock); + len = snprintf(NULL, 0, "%*pb\n", + cpumask_pr_args(tr->tracing_cpumask)) + 1; + mask_str = kmalloc(len, GFP_KERNEL); + if (!mask_str) + return -ENOMEM; - len = snprintf(mask_str, count, "%*pb\n", + len = snprintf(mask_str, len, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)); if (len >= count) { count = -EINVAL; goto out_err; } - count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); + count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); out_err: - mutex_unlock(&tracing_cpumask_update_lock); + kfree(mask_str); return count; } @@ -4228,8 +4221,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, if (err) goto err_unlock; - mutex_lock(&tracing_cpumask_update_lock); - local_irq_disable(); arch_spin_lock(&tr->max_lock); for_each_tracing_cpu(cpu) { @@ -4252,8 +4243,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, local_irq_enable(); cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); - - mutex_unlock(&tracing_cpumask_update_lock); free_cpumask_var(tracing_cpumask_new); return count; From 6391294874d505a5e8681c41439a74e7019b267f Mon Sep 17 00:00:00 2001 From: David Kozub Date: Tue, 5 Dec 2017 22:40:04 +0100 Subject: [PATCH 0638/3326] USB: uas and storage: Add US_FL_BROKEN_FUA for another JMicron JMS567 ID commit 62354454625741f0569c2cbe45b2d192f8fd258e upstream. There is another JMS567-based USB3 UAS enclosure (152d:0578) that fails with the following error: [sda] tag#0 FAILED Result: hostbyte=DID_OK driverbyte=DRIVER_SENSE [sda] tag#0 Sense Key : Illegal Request [current] [sda] tag#0 Add. Sense: Invalid field in cdb The issue occurs both with UAS (occasionally) and mass storage (immediately after mounting a FS on a disk in the enclosure). Enabling US_FL_BROKEN_FUA quirk solves this issue. This patch adds an UNUSUAL_DEV with US_FL_BROKEN_FUA for the enclosure for both UAS and mass storage. Signed-off-by: David Kozub Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/storage/unusual_devs.h | 7 +++++++ drivers/usb/storage/unusual_uas.h | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index eb06d88b41d697..9af39644561f03 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2113,6 +2113,13 @@ UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA ), +/* Reported by David Kozub */ +UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, + "JMicron", + "JMS567", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA), + /* * Reported by Alexandre Oliva * JMicron responds to USN and several other SCSI ioctls with a diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index cde11535979300..9f356f7cf7d500 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -142,6 +142,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES), +/* Reported-by: David Kozub */ +UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, + "JMicron", + "JMS567", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA), + /* Reported-by: Hans de Goede */ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, "VIA", From 4c5ae6a301a5415d1334f6c655bebf91d475bd89 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Tue, 12 Dec 2017 14:25:13 -0500 Subject: [PATCH 0639/3326] USB: core: prevent malicious bNumInterfaces overflow commit 48a4ff1c7bb5a32d2e396b03132d20d552c0eca7 upstream. A malicious USB device with crafted descriptors can cause the kernel to access unallocated memory by setting the bNumInterfaces value too high in a configuration descriptor. Although the value is adjusted during parsing, this adjustment is skipped in one of the error return paths. This patch prevents the problem by setting bNumInterfaces to 0 initially. The existing code already sets it to the proper value after parsing is complete. Signed-off-by: Alan Stern Reported-by: Andrey Konovalov Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/config.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index c42a3e63eb07cc..843ef46d2537eb 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -555,6 +555,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, unsigned iad_num = 0; memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); + nintf = nintf_orig = config->desc.bNumInterfaces; + config->desc.bNumInterfaces = 0; // Adjusted later + if (config->desc.bDescriptorType != USB_DT_CONFIG || config->desc.bLength < USB_DT_CONFIG_SIZE || config->desc.bLength > size) { @@ -568,7 +571,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, buffer += config->desc.bLength; size -= config->desc.bLength; - nintf = nintf_orig = config->desc.bNumInterfaces; if (nintf > USB_MAXINTERFACES) { dev_warn(ddev, "config %d has too many interfaces: %d, " "using maximum allowed: %d\n", From 066f40dc495d5ab2ec9a871cb8703d9591d636c5 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Mon, 27 Nov 2017 10:12:44 -0500 Subject: [PATCH 0640/3326] ovl: Pass ovl_get_nlink() parameters in right order commit 08d8f8a5b094b66b29936e8751b4a818b8db1207 upstream. Right now we seem to be passing index as "lowerdentry" and origin.dentry as "upperdentry". IIUC, we should pass these parameters in reversed order and this looks like a bug. Signed-off-by: Vivek Goyal Acked-by: Amir Goldstein Fixes: caf70cb2ba5d ("ovl: cleanup orphan index entries") Signed-off-by: Miklos Szeredi Signed-off-by: Greg Kroah-Hartman --- fs/overlayfs/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index bc6d5c5a344353..4bb7e4f53ea6d4 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -437,7 +437,7 @@ int ovl_verify_index(struct dentry *index, struct path *lowerstack, /* Check if index is orphan and don't warn before cleaning it */ if (d_inode(index)->i_nlink == 1 && - ovl_get_nlink(index, origin.dentry, 0) == 0) + ovl_get_nlink(origin.dentry, index, 0) == 0) err = -ENOENT; dput(origin.dentry); From cb6ee0f3c19980328529d5ed7a1650e41164ca49 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Wed, 29 Nov 2017 07:35:21 +0200 Subject: [PATCH 0641/3326] ovl: update ctx->pos on impure dir iteration commit b02a16e6413a2f782e542ef60bad9ff6bf212f8a upstream. This fixes a regression with readdir of impure dir in overlayfs that is shared to VM via 9p fs. Reported-by: Miguel Bernal Marin Fixes: 4edb83bb1041 ("ovl: constant d_ino for non-merge dirs") Signed-off-by: Amir Goldstein Tested-by: Miguel Bernal Marin Signed-off-by: Miklos Szeredi Signed-off-by: Greg Kroah-Hartman --- fs/overlayfs/readdir.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 698b74dd750ee6..b2c7f33e08fce1 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -645,7 +645,10 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx) return PTR_ERR(rdt.cache); } - return iterate_dir(od->realfile, &rdt.ctx); + err = iterate_dir(od->realfile, &rdt.ctx); + ctx->pos = rdt.ctx.pos; + + return err; } From 7120d742ad8d0f1fe37e4b73827e166fc1e01eea Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Thu, 7 Dec 2017 14:16:47 -0700 Subject: [PATCH 0642/3326] usbip: fix stub_rx: get_pipe() to validate endpoint number commit 635f545a7e8be7596b9b2b6a43cab6bbd5a88e43 upstream. get_pipe() routine doesn't validate the input endpoint number and uses to reference ep_in and ep_out arrays. Invalid endpoint number can trigger BUG(). Range check the epnum and returning error instead of calling BUG(). Change caller stub_recv_cmd_submit() to handle the get_pipe() error return. Reported-by: Secunia Research Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usbip/stub_rx.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index 191b176ffedfda..4f03c611c16d2c 100644 --- a/drivers/usb/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c @@ -342,15 +342,15 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir) struct usb_host_endpoint *ep; struct usb_endpoint_descriptor *epd = NULL; + if (epnum < 0 || epnum > 15) + goto err_ret; + if (dir == USBIP_DIR_IN) ep = udev->ep_in[epnum & 0x7f]; else ep = udev->ep_out[epnum & 0x7f]; - if (!ep) { - dev_err(&sdev->udev->dev, "no such endpoint?, %d\n", - epnum); - BUG(); - } + if (!ep) + goto err_ret; epd = &ep->desc; if (usb_endpoint_xfer_control(epd)) { @@ -381,9 +381,10 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir) return usb_rcvisocpipe(udev, epnum); } +err_ret: /* NOT REACHED */ - dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum); - return 0; + dev_err(&sdev->udev->dev, "get pipe() invalid epnum %d\n", epnum); + return -1; } static void masking_bogus_flags(struct urb *urb) @@ -449,6 +450,9 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, struct usb_device *udev = sdev->udev; int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); + if (pipe == -1) + return; + priv = stub_priv_alloc(sdev, pdu); if (!priv) return; From 1621db059603e781f61a9bf33cba639b42faf0bc Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Thu, 7 Dec 2017 14:16:48 -0700 Subject: [PATCH 0643/3326] usbip: fix stub_rx: harden CMD_SUBMIT path to handle malicious input commit c6688ef9f29762e65bce325ef4acd6c675806366 upstream. Harden CMD_SUBMIT path to handle malicious input that could trigger large memory allocations. Add checks to validate transfer_buffer_length and number_of_packets to protect against bad input requesting for unbounded memory allocations. Validate early in get_pipe() and return failure. Reported-by: Secunia Research Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usbip/stub_rx.c | 35 +++++++++++++++++++++++++++++++---- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index 4f03c611c16d2c..283a9be77a229e 100644 --- a/drivers/usb/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c @@ -336,11 +336,13 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev, return priv; } -static int get_pipe(struct stub_device *sdev, int epnum, int dir) +static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu) { struct usb_device *udev = sdev->udev; struct usb_host_endpoint *ep; struct usb_endpoint_descriptor *epd = NULL; + int epnum = pdu->base.ep; + int dir = pdu->base.direction; if (epnum < 0 || epnum > 15) goto err_ret; @@ -353,6 +355,15 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir) goto err_ret; epd = &ep->desc; + + /* validate transfer_buffer_length */ + if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) { + dev_err(&sdev->udev->dev, + "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n", + pdu->u.cmd_submit.transfer_buffer_length); + return -1; + } + if (usb_endpoint_xfer_control(epd)) { if (dir == USBIP_DIR_OUT) return usb_sndctrlpipe(udev, epnum); @@ -375,6 +386,21 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir) } if (usb_endpoint_xfer_isoc(epd)) { + /* validate packet size and number of packets */ + unsigned int maxp, packets, bytes; + + maxp = usb_endpoint_maxp(epd); + maxp *= usb_endpoint_maxp_mult(epd); + bytes = pdu->u.cmd_submit.transfer_buffer_length; + packets = DIV_ROUND_UP(bytes, maxp); + + if (pdu->u.cmd_submit.number_of_packets < 0 || + pdu->u.cmd_submit.number_of_packets > packets) { + dev_err(&sdev->udev->dev, + "CMD_SUBMIT: isoc invalid num packets %d\n", + pdu->u.cmd_submit.number_of_packets); + return -1; + } if (dir == USBIP_DIR_OUT) return usb_sndisocpipe(udev, epnum); else @@ -383,7 +409,7 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir) err_ret: /* NOT REACHED */ - dev_err(&sdev->udev->dev, "get pipe() invalid epnum %d\n", epnum); + dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum); return -1; } @@ -448,7 +474,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, struct stub_priv *priv; struct usbip_device *ud = &sdev->ud; struct usb_device *udev = sdev->udev; - int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); + int pipe = get_pipe(sdev, pdu); if (pipe == -1) return; @@ -470,7 +496,8 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, } /* allocate urb transfer buffer, if needed */ - if (pdu->u.cmd_submit.transfer_buffer_length > 0) { + if (pdu->u.cmd_submit.transfer_buffer_length > 0 && + pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) { priv->urb->transfer_buffer = kzalloc(pdu->u.cmd_submit.transfer_buffer_length, GFP_KERNEL); From b6a2ad646c13bb9d1231bce5599cb3176ff33ca4 Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Thu, 7 Dec 2017 14:16:49 -0700 Subject: [PATCH 0644/3326] usbip: prevent vhci_hcd driver from leaking a socket pointer address commit 2f2d0088eb93db5c649d2a5e34a3800a8a935fc5 upstream. When a client has a USB device attached over IP, the vhci_hcd driver is locally leaking a socket pointer address via the /sys/devices/platform/vhci_hcd/status file (world-readable) and in debug output when "usbip --debug port" is run. Fix it to not leak. The socket pointer address is not used at the moment and it was made visible as a convenient way to find IP address from socket pointer address by looking up /proc/net/{tcp,tcp6}. As this opens a security hole, the fix replaces socket pointer address with sockfd. Reported-by: Secunia Research Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usbip/usbip_common.h | 1 + drivers/usb/usbip/vhci_sysfs.c | 25 ++++++++++++++++--------- tools/usb/usbip/libsrc/vhci_driver.c | 8 ++++---- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index 3050fc99a417b5..33737b612b1fdc 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -270,6 +270,7 @@ struct usbip_device { /* lock for status */ spinlock_t lock; + int sockfd; struct socket *tcp_socket; struct task_struct *tcp_rx; diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index 1b9f60a22e0b41..84df63e3130d28 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -31,15 +31,20 @@ /* * output example: - * hub port sta spd dev socket local_busid - * hs 0000 004 000 00000000 c5a7bb80 1-2.3 + * hub port sta spd dev sockfd local_busid + * hs 0000 004 000 00000000 3 1-2.3 * ................................................ - * ss 0008 004 000 00000000 d8cee980 2-3.4 + * ss 0008 004 000 00000000 4 2-3.4 * ................................................ * - * IP address can be retrieved from a socket pointer address by looking - * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a - * port number and its peer IP address. + * Output includes socket fd instead of socket pointer address to avoid + * leaking kernel memory address in: + * /sys/devices/platform/vhci_hcd.0/status and in debug output. + * The socket pointer address is not used at the moment and it was made + * visible as a convenient way to find IP address from socket pointer + * address by looking up /proc/net/{tcp,tcp6}. As this opens a security + * hole, the change is made to use sockfd instead. + * */ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev) { @@ -53,8 +58,8 @@ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vd if (vdev->ud.status == VDEV_ST_USED) { *out += sprintf(*out, "%03u %08x ", vdev->speed, vdev->devid); - *out += sprintf(*out, "%16p %s", - vdev->ud.tcp_socket, + *out += sprintf(*out, "%u %s", + vdev->ud.sockfd, dev_name(&vdev->udev->dev)); } else { @@ -174,7 +179,8 @@ static ssize_t nports_show(struct device *dev, struct device_attribute *attr, char *s = out; /* - * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, thus the * 2. + * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, + * thus the * 2. */ out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers); return out - s; @@ -380,6 +386,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, vdev->devid = devid; vdev->speed = speed; + vdev->ud.sockfd = sockfd; vdev->ud.tcp_socket = socket; vdev->ud.status = VDEV_ST_NOTASSIGNED; diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c index 8a1cd1616de419..d1fc0f9f00fb8c 100644 --- a/tools/usb/usbip/libsrc/vhci_driver.c +++ b/tools/usb/usbip/libsrc/vhci_driver.c @@ -50,14 +50,14 @@ static int parse_status(const char *value) while (*c != '\0') { int port, status, speed, devid; - unsigned long socket; + int sockfd; char lbusid[SYSFS_BUS_ID_SIZE]; struct usbip_imported_device *idev; char hub[3]; - ret = sscanf(c, "%2s %d %d %d %x %lx %31s\n", + ret = sscanf(c, "%2s %d %d %d %x %u %31s\n", hub, &port, &status, &speed, - &devid, &socket, lbusid); + &devid, &sockfd, lbusid); if (ret < 5) { dbg("sscanf failed: %d", ret); @@ -66,7 +66,7 @@ static int parse_status(const char *value) dbg("hub %s port %d status %d speed %d devid %x", hub, port, status, speed, devid); - dbg("socket %lx lbusid %s", socket, lbusid); + dbg("sockfd %u lbusid %s", sockfd, lbusid); /* if a device is connected, look at it */ idev = &vhci_driver->idev[port]; From d78a5506cf0ea112124c1ffa5c0aae09b579d96d Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Thu, 7 Dec 2017 14:16:50 -0700 Subject: [PATCH 0645/3326] usbip: fix stub_send_ret_submit() vulnerability to null transfer_buffer commit be6123df1ea8f01ee2f896a16c2b7be3e4557a5a upstream. stub_send_ret_submit() handles urb with a potential null transfer_buffer, when it replays a packet with potential malicious data that could contain a null buffer. Add a check for the condition when actual_length > 0 and transfer_buffer is null. Reported-by: Secunia Research Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usbip/stub_tx.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c index be50cef645d8aa..87ff94be4235eb 100644 --- a/drivers/usb/usbip/stub_tx.c +++ b/drivers/usb/usbip/stub_tx.c @@ -181,6 +181,13 @@ static int stub_send_ret_submit(struct stub_device *sdev) memset(&pdu_header, 0, sizeof(pdu_header)); memset(&msg, 0, sizeof(msg)); + if (urb->actual_length > 0 && !urb->transfer_buffer) { + dev_err(&sdev->udev->dev, + "urb: actual_length %d transfer_buffer null\n", + urb->actual_length); + return -1; + } + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) iovnum = 2 + urb->number_of_packets; else From 318cea7d8d801cfa70f5b3a3a991425cd502b574 Mon Sep 17 00:00:00 2001 From: Christoph Fritz Date: Sat, 9 Dec 2017 23:47:55 +0100 Subject: [PATCH 0646/3326] mmc: core: apply NO_CMD23 quirk to some specific cards commit 91516a2a4734614d62ee3ed921f8f88acc67c000 upstream. To get an usdhc Apacer and some ATP SD cards work reliable, CMD23 needs to be disabled. This has been tested on i.MX6 (sdhci-esdhc) and rk3288 (dw_mmc-rockchip). Without this patch on i.MX6 (sdhci-esdhc): $ dd if=/dev/urandom of=/mnt/test bs=1M count=10 conv=fsync | | mmc0: starting CMD25 arg 00a71f00 flags 000000b5 | mmc0: blksz 512 blocks 1024 flags 00000100 tsac 3000 ms nsac 0 | mmc0: CMD12 arg 00000000 flags 0000049d | sdhci [sdhci_irq()]: *** mmc0 got interrupt: 0x00000001 | mmc0: Timeout waiting for hardware interrupt. Without this patch on rk3288 (dw_mmc-rockchip): | mmc1: Card stuck in programming state! mmcblk1 card_busy_detect | dwmmc_rockchip ff0c0000.dwmmc: Busy; trying anyway | mmc_host mmc1: Bus speed (slot 0) = 400000Hz (slot req 400000Hz, | actual 400000HZ div = 0) | mmc1: card never left busy state | mmc1: tried to reset card, got error -110 | blk_update_request: I/O error, dev mmcblk1, sector 139778 | Buffer I/O error on dev mmcblk1p1, logical block 131586, lost async | page write Signed-off-by: Christoph Fritz Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/core/card.h | 2 ++ drivers/mmc/core/quirks.h | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h index f06cd91964ce97..79a5b985ccf5ee 100644 --- a/drivers/mmc/core/card.h +++ b/drivers/mmc/core/card.h @@ -75,9 +75,11 @@ struct mmc_fixup { #define EXT_CSD_REV_ANY (-1u) #define CID_MANFID_SANDISK 0x2 +#define CID_MANFID_ATP 0x9 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 #define CID_MANFID_SAMSUNG 0x15 +#define CID_MANFID_APACER 0x27 #define CID_MANFID_KINGSTON 0x70 #define CID_MANFID_HYNIX 0x90 diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index f664e9cbc9f8b6..75d317623852dc 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -52,6 +52,14 @@ static const struct mmc_fixup mmc_blk_fixups[] = { MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), + /* + * Some SD cards lockup while using CMD23 multiblock transfers. + */ + MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd, + MMC_QUIRK_BLK_NO_CMD23), + MMC_FIXUP("APUSD", CID_MANFID_APACER, 0x5048, add_quirk_sd, + MMC_QUIRK_BLK_NO_CMD23), + /* * Some MMC cards need longer data read timeout than indicated in CSD. */ From eae219a2fc5554d86b9ac4d31bef73c273e926c0 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Thu, 30 Nov 2017 11:59:22 +0800 Subject: [PATCH 0647/3326] ceph: drop negative child dentries before try pruning inode's alias commit 040d786032bf59002d374b86d75b04d97624005c upstream. Negative child dentry holds reference on inode's alias, it makes d_prune_aliases() do nothing. Signed-off-by: "Yan, Zheng" Reviewed-by: Jeff Layton Signed-off-by: Ilya Dryomov Signed-off-by: Greg Kroah-Hartman --- fs/ceph/mds_client.c | 42 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 4 deletions(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 0687ab3c32674d..bf378ddca4dba1 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1428,6 +1428,29 @@ static int __close_session(struct ceph_mds_client *mdsc, return request_close_session(mdsc, session); } +static bool drop_negative_children(struct dentry *dentry) +{ + struct dentry *child; + bool all_negative = true; + + if (!d_is_dir(dentry)) + goto out; + + spin_lock(&dentry->d_lock); + list_for_each_entry(child, &dentry->d_subdirs, d_child) { + if (d_really_is_positive(child)) { + all_negative = false; + break; + } + } + spin_unlock(&dentry->d_lock); + + if (all_negative) + shrink_dcache_parent(dentry); +out: + return all_negative; +} + /* * Trim old(er) caps. * @@ -1473,16 +1496,27 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) if ((used | wanted) & ~oissued & mine) goto out; /* we need these caps */ - session->s_trim_caps--; if (oissued) { /* we aren't the only cap.. just remove us */ __ceph_remove_cap(cap, true); + session->s_trim_caps--; } else { + struct dentry *dentry; /* try dropping referring dentries */ spin_unlock(&ci->i_ceph_lock); - d_prune_aliases(inode); - dout("trim_caps_cb %p cap %p pruned, count now %d\n", - inode, cap, atomic_read(&inode->i_count)); + dentry = d_find_any_alias(inode); + if (dentry && drop_negative_children(dentry)) { + int count; + dput(dentry); + d_prune_aliases(inode); + count = atomic_read(&inode->i_count); + if (count == 1) + session->s_trim_caps--; + dout("trim_caps_cb %p cap %p pruned, count now %d\n", + inode, cap, count); + } else { + dput(dentry); + } return 0; } From 912fe79116f2456a00cf4a9b2ad3ef207ba3e162 Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 8 Dec 2017 18:10:06 +0200 Subject: [PATCH 0648/3326] usb: xhci: fix TDS for MTK xHCI1.1 commit 72b663a99c074a8d073e7ecdae446cfb024ef551 upstream. For MTK's xHCI 1.0 or latter, TD size is the number of max packet sized packets remaining in the TD, not including this TRB (following spec). For MTK's xHCI 0.96 and older, TD size is the number of max packet sized packets remaining in the TD, including this TRB (not following spec). Signed-off-by: Chunfeng Yun Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-ring.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 353520005c1358..6996235e34a953 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -3121,7 +3121,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, { u32 maxp, total_packet_count; - /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */ + /* MTK xHCI 0.96 contains some features from 1.0 */ if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) return ((td_total_len - transferred) >> 10); @@ -3130,8 +3130,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, trb_buff_len == td_total_len) return 0; - /* for MTK xHCI, TD size doesn't include this TRB */ - if (xhci->quirks & XHCI_MTK_HOST) + /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */ + if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) trb_buff_len = 0; maxp = usb_endpoint_maxp(&urb->ep->desc); From 2b3ff282dff3a42a081bef0f8249a99f64d7669f Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Fri, 8 Dec 2017 18:10:05 +0200 Subject: [PATCH 0649/3326] xhci: Don't add a virt_dev to the devs array before it's fully allocated commit 5d9b70f7d52eb14bb37861c663bae44de9521c35 upstream. Avoid null pointer dereference if some function is walking through the devs array accessing members of a new virt_dev that is mid allocation. Add the virt_dev to xhci->devs[i] _after_ the virt_device and all its members are properly allocated. issue found by KASAN: null-ptr-deref in xhci_find_slot_id_by_port "Quick analysis suggests that xhci_alloc_virt_device() is not mutex protected. If so, there is a time frame where xhci->devs[slot_id] is set but not fully initialized. Specifically, xhci->devs[i]->udev can be NULL." Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-mem.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 97f30eb7dac0d0..ccdc971283d092 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -983,10 +983,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, return 0; } - xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); - if (!xhci->devs[slot_id]) + dev = kzalloc(sizeof(*dev), flags); + if (!dev) return 0; - dev = xhci->devs[slot_id]; /* Allocate the (output) device context that will be used in the HC. */ dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); @@ -1027,9 +1026,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, trace_xhci_alloc_virt_device(dev); + xhci->devs[slot_id] = dev; + return 1; fail: - xhci_free_virt_device(xhci, slot_id); + + if (dev->in_ctx) + xhci_free_container_ctx(xhci, dev->in_ctx); + if (dev->out_ctx) + xhci_free_container_ctx(xhci, dev->out_ctx); + kfree(dev); + return 0; } From 4995bfa99096410af00aa0b1b374005494c709f3 Mon Sep 17 00:00:00 2001 From: Daniel Jurgens Date: Tue, 5 Dec 2017 22:30:01 +0200 Subject: [PATCH 0650/3326] IB/core: Bound check alternate path port number commit 4cae8ff136782d77b108cb3a5ba53e60597ba3a6 upstream. The alternate port number is used as an array index in the IB security implementation, invalid values can result in a kernel panic. Fixes: d291f1a65232 ("IB/core: Enforce PKey security on QPs") Signed-off-by: Daniel Jurgens Reviewed-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/uverbs_cmd.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 52a2cf2d83aaf4..d8f540054392ab 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1982,6 +1982,12 @@ static int modify_qp(struct ib_uverbs_file *file, goto release_qp; } + if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && + !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) { + ret = -EINVAL; + goto release_qp; + } + attr->qp_state = cmd->base.qp_state; attr->cur_qp_state = cmd->base.cur_qp_state; attr->path_mtu = cmd->base.path_mtu; From 74b6274f8c5e81d7243ca2dea4f80c8585b3d0e0 Mon Sep 17 00:00:00 2001 From: Daniel Jurgens Date: Tue, 5 Dec 2017 22:30:02 +0200 Subject: [PATCH 0651/3326] IB/core: Don't enforce PKey security on SMI MADs commit 0fbe8f575b15585eec3326e43708fbbc024e8486 upstream. Per the infiniband spec an SMI MAD can have any PKey. Checking the pkey on SMI MADs is not necessary, and it seems that some older adapters using the mthca driver don't follow the convention of using the default PKey, resulting in false denials, or errors querying the PKey cache. SMI MAD security is still enforced, only agents allowed to manage the subnet are able to receive or send SMI MADs. Reported-by: Chris Blake Fixes: 47a2b338fe63 ("IB/core: Enforce security on management datagrams") Signed-off-by: Daniel Jurgens Reviewed-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/security.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index a337386652b073..feafdb961c485c 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -739,8 +739,11 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) if (!rdma_protocol_ib(map->agent.device, map->agent.port_num)) return 0; - if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) - return -EACCES; + if (map->agent.qp->qp_type == IB_QPT_SMI) { + if (!map->agent.smp_allowed) + return -EACCES; + return 0; + } return ib_security_pkey_access(map->agent.device, map->agent.port_num, From 268b2cc325061c60fa2dccfa4d61495e9306c4fa Mon Sep 17 00:00:00 2001 From: Scott Mayhew Date: Fri, 8 Dec 2017 16:00:12 -0500 Subject: [PATCH 0652/3326] nfs: don't wait on commit in nfs_commit_inode() if there were no commit requests commit dc4fd9ab01ab379ae5af522b3efd4187a7c30a31 upstream. If there were no commit requests, then nfs_commit_inode() should not wait on the commit or mark the inode dirty, otherwise the following BUG_ON can be triggered: [ 1917.130762] kernel BUG at fs/inode.c:578! [ 1917.130766] Oops: Exception in kernel mode, sig: 5 [#1] [ 1917.130768] SMP NR_CPUS=2048 NUMA pSeries [ 1917.130772] Modules linked in: iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi blocklayoutdriver rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd grace fscache sunrpc sg nx_crypto pseries_rng ip_tables xfs libcrc32c sd_mod crc_t10dif crct10dif_generic crct10dif_common ibmvscsi scsi_transport_srp ibmveth scsi_tgt dm_mirror dm_region_hash dm_log dm_mod [ 1917.130805] CPU: 2 PID: 14923 Comm: umount.nfs4 Tainted: G ------------ T 3.10.0-768.el7.ppc64 #1 [ 1917.130810] task: c0000005ecd88040 ti: c00000004cea0000 task.ti: c00000004cea0000 [ 1917.130813] NIP: c000000000354178 LR: c000000000354160 CTR: c00000000012db80 [ 1917.130816] REGS: c00000004cea3720 TRAP: 0700 Tainted: G ------------ T (3.10.0-768.el7.ppc64) [ 1917.130820] MSR: 8000000100029032 CR: 22002822 XER: 20000000 [ 1917.130828] CFAR: c00000000011f594 SOFTE: 1 GPR00: c000000000354160 c00000004cea39a0 c0000000014c4700 c0000000018cc750 GPR04: 000000000000c750 80c0000000000000 0600000000000000 04eeb76bea749a03 GPR08: 0000000000000034 c0000000018cc758 0000000000000001 d000000005e619e8 GPR12: c00000000012db80 c000000007b31200 0000000000000000 0000000000000000 GPR16: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 GPR20: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 GPR24: 0000000000000000 c000000000dfc3ec 0000000000000000 c0000005eefc02c0 GPR28: d0000000079dbd50 c0000005b94a02c0 c0000005b94a0250 c0000005b94a01c8 [ 1917.130867] NIP [c000000000354178] .evict+0x1c8/0x350 [ 1917.130871] LR [c000000000354160] .evict+0x1b0/0x350 [ 1917.130873] Call Trace: [ 1917.130876] [c00000004cea39a0] [c000000000354160] .evict+0x1b0/0x350 (unreliable) [ 1917.130880] [c00000004cea3a30] [c0000000003558cc] .evict_inodes+0x13c/0x270 [ 1917.130884] [c00000004cea3af0] [c000000000327d20] .kill_anon_super+0x70/0x1e0 [ 1917.130896] [c00000004cea3b80] [d000000005e43e30] .nfs_kill_super+0x20/0x60 [nfs] [ 1917.130900] [c00000004cea3c00] [c000000000328a20] .deactivate_locked_super+0xa0/0x1b0 [ 1917.130903] [c00000004cea3c80] [c00000000035ba54] .cleanup_mnt+0xd4/0x180 [ 1917.130907] [c00000004cea3d10] [c000000000119034] .task_work_run+0x114/0x150 [ 1917.130912] [c00000004cea3db0] [c00000000001ba6c] .do_notify_resume+0xcc/0x100 [ 1917.130916] [c00000004cea3e30] [c00000000000a7b0] .ret_from_except_lite+0x5c/0x60 [ 1917.130919] Instruction dump: [ 1917.130921] 7fc3f378 486734b5 60000000 387f00a0 38800003 4bdcb365 60000000 e95f00a0 [ 1917.130927] 694a0060 7d4a0074 794ad182 694a0001 <0b0a0000> 892d02a4 2f890000 40de0134 Signed-off-by: Scott Mayhew Signed-off-by: Anna Schumaker Signed-off-by: Greg Kroah-Hartman --- fs/nfs/write.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index babebbccae2a0e..de325804941d1f 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1889,6 +1889,8 @@ int nfs_commit_inode(struct inode *inode, int how) if (res) error = nfs_generic_commit_list(inode, &head, how, &cinfo); nfs_commit_end(cinfo.mds); + if (res == 0) + return res; if (error < 0) goto out_error; if (!may_wait) From db9d4784eb5ba8c69ea6b1a7b7c7c7af0b6bdf4d Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Fri, 1 Dec 2017 17:22:14 +0000 Subject: [PATCH 0653/3326] arm64: mm: Fix pte_mkclean, pte_mkdirty semantics commit 8781bcbc5e69d7da69e84c7044ca0284848d5d01 upstream. On systems with hardware dirty bit management, the ltp madvise09 unit test fails due to dirty bit information being lost and pages being incorrectly freed. This was bisected to: arm64: Ignore hardware dirty bit updates in ptep_set_wrprotect() Reverting this commit leads to a separate problem, that the unit test retains pages that should have been dropped due to the function madvise_free_pte_range(.) not cleaning pte's properly. Currently pte_mkclean only clears the software dirty bit, thus the following code sequence can appear: pte = pte_mkclean(pte); if (pte_dirty(pte)) // this condition can return true with HW DBM! This patch also adjusts pte_mkclean to set PTE_RDONLY thus effectively clearing both the SW and HW dirty information. In order for this to function on systems without HW DBM, we need to also adjust pte_mkdirty to remove the read only bit from writable pte's to avoid infinite fault loops. Fixes: 64c26841b349 ("arm64: Ignore hardware dirty bit updates in ptep_set_wrprotect()") Reported-by: Bhupinder Thakur Tested-by: Bhupinder Thakur Reviewed-by: Catalin Marinas Signed-off-by: Steve Capper Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/pgtable.h | 33 +++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index c9530b5b5ca836..960d05c8816af0 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -149,12 +149,20 @@ static inline pte_t pte_mkwrite(pte_t pte) static inline pte_t pte_mkclean(pte_t pte) { - return clear_pte_bit(pte, __pgprot(PTE_DIRTY)); + pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); + pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); + + return pte; } static inline pte_t pte_mkdirty(pte_t pte) { - return set_pte_bit(pte, __pgprot(PTE_DIRTY)); + pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); + + if (pte_write(pte)) + pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); + + return pte; } static inline pte_t pte_mkold(pte_t pte) @@ -642,28 +650,23 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* - * ptep_set_wrprotect - mark read-only while preserving the hardware update of - * the Access Flag. + * ptep_set_wrprotect - mark read-only while trasferring potential hardware + * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. */ #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t old_pte, pte; - /* - * ptep_set_wrprotect() is only called on CoW mappings which are - * private (!VM_SHARED) with the pte either read-only (!PTE_WRITE && - * PTE_RDONLY) or writable and software-dirty (PTE_WRITE && - * !PTE_RDONLY && PTE_DIRTY); see is_cow_mapping() and - * protection_map[]. There is no race with the hardware update of the - * dirty state: clearing of PTE_RDONLY when PTE_WRITE (a.k.a. PTE_DBM) - * is set. - */ - VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(*ptep), - "%s: potential race with hardware DBM", __func__); pte = READ_ONCE(*ptep); do { old_pte = pte; + /* + * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY + * clear), set the PTE_DIRTY bit. + */ + if (pte_hw_dirty(pte)) + pte = pte_mkdirty(pte); pte = pte_wrprotect(pte); pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), pte_val(old_pte), pte_val(pte)); From cdcfd13a66eb53fd96dc480de79c423aa0de9d39 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Mon, 4 Dec 2017 14:13:05 +0000 Subject: [PATCH 0654/3326] arm64: Initialise high_memory global variable earlier commit f24e5834a2c3f6c5f814a417f858226f0a010ade upstream. The high_memory global variable is used by cma_declare_contiguous(.) before it is defined. We don't notice this as we compute __pa(high_memory - 1), and it looks like we're processing a VA from the direct linear map. This problem becomes apparent when we flip the kernel virtual address space and the linear map is moved to the bottom of the kernel VA space. This patch moves the initialisation of high_memory before it used. Fixes: f7426b983a6a ("mm: cma: adjust address limit to avoid hitting low/high memory boundary") Signed-off-by: Steve Capper Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- arch/arm64/mm/init.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 5960bef0170df8..00e7b900ca4193 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -476,6 +476,8 @@ void __init arm64_memblock_init(void) reserve_elfcorehdr(); + high_memory = __va(memblock_end_of_DRAM() - 1) + 1; + dma_contiguous_reserve(arm64_dma_phys_limit); memblock_allow_resize(); @@ -502,7 +504,6 @@ void __init bootmem_init(void) sparse_init(); zone_sizes_init(min, max); - high_memory = __va((max << PAGE_SHIFT) - 1) + 1; memblock_dump_all(); } From 9854611cf5e604773fecfa75bfafc03621df508c Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 13 Dec 2017 11:45:42 +0000 Subject: [PATCH 0655/3326] arm64: fix CONFIG_DEBUG_WX address reporting commit 1d08a044cf12aee37dfd54837558e3295287b343 upstream. In ptdump_check_wx(), we pass walk_pgd() a start address of 0 (rather than VA_START) for the init_mm. This means that any reported W&X addresses are offset by VA_START, which is clearly wrong and can make them appear like userspace addresses. Fix this by telling the ptdump code that we're walking init_mm starting at VA_START. We don't need to update the addr_markers, since these are still valid bounds regardless. Fixes: 1404d6f13e47 ("arm64: dump: Add checking for writable and exectuable pages") Signed-off-by: Mark Rutland Cc: Kees Cook Cc: Laura Abbott Reported-by: Timur Tabi Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- arch/arm64/mm/dump.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index ca74a2aace425b..7b60d62ac5939e 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -389,7 +389,7 @@ void ptdump_check_wx(void) .check_wx = true, }; - walk_pgd(&st, &init_mm, 0); + walk_pgd(&st, &init_mm, VA_START); note_page(&st, 0, 0, 0); if (st.wx_pages || st.uxn_pages) pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", From 39c4330c9779d0acc213a0ace72b88a7b7a06012 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 5 Dec 2017 16:57:51 -0800 Subject: [PATCH 0656/3326] scsi: core: Fix a scsi_show_rq() NULL pointer dereference commit 14e3062fb18532175af4d1c4073597999f7a2248 upstream. Avoid that scsi_show_rq() triggers a NULL pointer dereference if called after sd_uninit_command(). Swap the NULL pointer assignment and the mempool_free() call in sd_uninit_command() to make it less likely that scsi_show_rq() triggers a use-after-free. Note: even with these changes scsi_show_rq() can trigger a use-after-free but that's a lesser evil than e.g. suppressing debug information for T10 PI Type 2 commands completely. This patch fixes the following oops: BUG: unable to handle kernel NULL pointer dereference at (null) IP: scsi_format_opcode_name+0x1a/0x1c0 CPU: 1 PID: 1881 Comm: cat Not tainted 4.14.0-rc2.blk_mq_io_hang+ #516 Call Trace: __scsi_format_command+0x27/0xc0 scsi_show_rq+0x5c/0xc0 __blk_mq_debugfs_rq_show+0x116/0x130 blk_mq_debugfs_rq_show+0xe/0x10 seq_read+0xfe/0x3b0 full_proxy_read+0x54/0x90 __vfs_read+0x37/0x160 vfs_read+0x96/0x130 SyS_read+0x55/0xc0 entry_SYSCALL_64_fastpath+0x1a/0xa5 [mkp: added Type 2] Fixes: 0eebd005dd07 ("scsi: Implement blk_mq_ops.show_rq()") Reported-by: Ming Lei Signed-off-by: Bart Van Assche Cc: James E.J. Bottomley Cc: Martin K. Petersen Cc: Ming Lei Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Johannes Thumshirn Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/scsi_debugfs.c | 6 ++++-- drivers/scsi/sd.c | 4 +++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c index 01f08c03f2c185..c3765d29fd3ff8 100644 --- a/drivers/scsi/scsi_debugfs.c +++ b/drivers/scsi/scsi_debugfs.c @@ -8,9 +8,11 @@ void scsi_show_rq(struct seq_file *m, struct request *rq) { struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req); int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc); - char buf[80]; + const u8 *const cdb = READ_ONCE(cmd->cmnd); + char buf[80] = "(?)"; - __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len); + if (cdb) + __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len); seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf, cmd->retries, msecs / 1000, msecs % 1000); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index d175c5c5ccf87e..d841743b2107c9 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1284,6 +1284,7 @@ static int sd_init_command(struct scsi_cmnd *cmd) static void sd_uninit_command(struct scsi_cmnd *SCpnt) { struct request *rq = SCpnt->request; + u8 *cmnd; if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK) sd_zbc_write_unlock_zone(SCpnt); @@ -1292,9 +1293,10 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt) __free_page(rq->special_vec.bv_page); if (SCpnt->cmnd != scsi_req(rq)->cmd) { - mempool_free(SCpnt->cmnd, sd_cdb_pool); + cmnd = SCpnt->cmnd; SCpnt->cmnd = NULL; SCpnt->cmd_len = 0; + mempool_free(cmnd, sd_cdb_pool); } } From c2f79ce3fe441486b3ab3dfce0f7bf49bd07dc3c Mon Sep 17 00:00:00 2001 From: Jason Yan Date: Mon, 11 Dec 2017 15:03:33 +0800 Subject: [PATCH 0657/3326] scsi: libsas: fix length error in sas_smp_handler() commit 621f6401fdeefe96dfe9eab4b167c7c39f552bb0 upstream. The return value of smp_execute_task_sg() is the untransferred residual, but bsg_job_done() requires the length of payload received. This makes SMP passthrough commands from userland by sg ioctl to libsas get a wrong response. The userland tools such as smp_utils failed because of these wrong responses: ~#smp_discover /dev/bsg/expander-2\:13 response too short, len=0 ~#smp_discover /dev/bsg/expander-2\:134 response too short, len=0 Fix this by passing the actual received length to bsg_job_done(). And if smp_execute_task_sg() returns 0, this means received length is exactly the buffer length. [mkp: typo] Fixes: 651a01364994 ("scsi: scsi_transport_sas: switch to bsg-lib for SMP passthrough") Signed-off-by: Jason Yan Reported-by: chenqilin Tested-by: chenqilin CC: Christoph Hellwig Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/libsas/sas_expander.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 6b4fd237517858..324d8d8c62decb 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -2145,7 +2145,7 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, struct sas_rphy *rphy) { struct domain_device *dev; - unsigned int reslen = 0; + unsigned int rcvlen = 0; int ret = -EINVAL; /* no rphy means no smp target support (ie aic94xx host) */ @@ -2179,12 +2179,12 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, ret = smp_execute_task_sg(dev, job->request_payload.sg_list, job->reply_payload.sg_list); - if (ret > 0) { - /* positive number is the untransferred residual */ - reslen = ret; + if (ret >= 0) { + /* bsg_job_done() requires the length received */ + rcvlen = job->reply_payload.payload_len - ret; ret = 0; } out: - bsg_job_done(job, ret, reslen); + bsg_job_done(job, ret, rcvlen); } From 282e4b259d4f9429f7813efc8076dd601ed59f45 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sat, 2 Dec 2017 13:04:54 -0500 Subject: [PATCH 0658/3326] sched/rt: Do not pull from current CPU if only one CPU to pull commit f73c52a5bcd1710994e53fbccc378c42b97a06b6 upstream. Daniel Wagner reported a crash on the BeagleBone Black SoC. This is a single CPU architecture, and does not have a functional arch_send_call_function_single_ipi() implementation which can crash the kernel if that is called. As it only has one CPU, it shouldn't be called, but if the kernel is compiled for SMP, the push/pull RT scheduling logic now calls it for irq_work if the one CPU is overloaded, it can use that function to call itself and crash the kernel. Ideally, we should disable the SCHED_FEAT(RT_PUSH_IPI) if the system only has a single CPU. But SCHED_FEAT is a constant if sched debugging is turned off. Another fix can also be used, and this should also help with normal SMP machines. That is, do not initiate the pull code if there's only one RT overloaded CPU, and that CPU happens to be the current CPU that is scheduling in a lower priority task. Even on a system with many CPUs, if there's many RT tasks waiting to run on a single CPU, and that CPU schedules in another RT task of lower priority, it will initiate the PULL logic in case there's a higher priority RT task on another CPU that is waiting to run. But if there is no other CPU with waiting RT tasks, it will initiate the RT pull logic on itself (as it still has RT tasks waiting to run). This is a wasted effort. Not only does this help with SMP code where the current CPU is the only one with RT overloaded tasks, it should also solve the issue that Daniel encountered, because it will prevent the PULL logic from executing, as there's only one CPU on the system, and the check added here will cause it to exit the RT pull code. Reported-by: Daniel Wagner Signed-off-by: Steven Rostedt (VMware) Acked-by: Peter Zijlstra Cc: Linus Torvalds Cc: Sebastian Andrzej Siewior Cc: Thomas Gleixner Cc: linux-rt-users Fixes: 4bdced5c9 ("sched/rt: Simplify the IPI based RT balancing logic") Link: http://lkml.kernel.org/r/20171202130454.4cbbfe8d@vmware.local.home Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- kernel/sched/rt.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index d8c43d73e07880..7464c5c4de4678 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2034,8 +2034,9 @@ static void pull_rt_task(struct rq *this_rq) bool resched = false; struct task_struct *p; struct rq *src_rq; + int rt_overload_count = rt_overloaded(this_rq); - if (likely(!rt_overloaded(this_rq))) + if (likely(!rt_overload_count)) return; /* @@ -2044,6 +2045,11 @@ static void pull_rt_task(struct rq *this_rq) */ smp_rmb(); + /* If we are the only overloaded CPU do nothing */ + if (rt_overload_count == 1 && + cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) + return; + #ifdef HAVE_RT_PUSH_IPI if (sched_feat(RT_PUSH_IPI)) { tell_cpu_to_push(this_rq); From fbce429b410cc443ec3fa3293a0e4e5804ae4f2d Mon Sep 17 00:00:00 2001 From: "monty_pavel@sina.com" Date: Sat, 25 Nov 2017 01:43:50 +0800 Subject: [PATCH 0659/3326] dm: fix various targets to dm_register_target after module __init resources created commit 7e6358d244e4706fe612a77b9c36519a33600ac0 upstream. A NULL pointer is seen if two concurrent "vgchange -ay -K " processes race to load the dm-thin-pool module: PID: 25992 TASK: ffff883cd7d23500 CPU: 4 COMMAND: "vgchange" #0 [ffff883cd743d600] machine_kexec at ffffffff81038fa9 0000001 [ffff883cd743d660] crash_kexec at ffffffff810c5992 0000002 [ffff883cd743d730] oops_end at ffffffff81515c90 0000003 [ffff883cd743d760] no_context at ffffffff81049f1b 0000004 [ffff883cd743d7b0] __bad_area_nosemaphore at ffffffff8104a1a5 0000005 [ffff883cd743d800] bad_area at ffffffff8104a2ce 0000006 [ffff883cd743d830] __do_page_fault at ffffffff8104aa6f 0000007 [ffff883cd743d950] do_page_fault at ffffffff81517bae 0000008 [ffff883cd743d980] page_fault at ffffffff81514f95 [exception RIP: kmem_cache_alloc+108] RIP: ffffffff8116ef3c RSP: ffff883cd743da38 RFLAGS: 00010046 RAX: 0000000000000004 RBX: ffffffff81121b90 RCX: ffff881bf1e78cc0 RDX: 0000000000000000 RSI: 00000000000000d0 RDI: 0000000000000000 RBP: ffff883cd743da68 R8: ffff881bf1a4eb00 R9: 0000000080042000 R10: 0000000000002000 R11: 0000000000000000 R12: 00000000000000d0 R13: 0000000000000000 R14: 00000000000000d0 R15: 0000000000000246 ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018 0000009 [ffff883cd743da70] mempool_alloc_slab at ffffffff81121ba5 0000010 [ffff883cd743da80] mempool_create_node at ffffffff81122083 0000011 [ffff883cd743dad0] mempool_create at ffffffff811220f4 0000012 [ffff883cd743dae0] pool_ctr at ffffffffa08de049 [dm_thin_pool] 0000013 [ffff883cd743dbd0] dm_table_add_target at ffffffffa0005f2f [dm_mod] 0000014 [ffff883cd743dc30] table_load at ffffffffa0008ba9 [dm_mod] 0000015 [ffff883cd743dc90] ctl_ioctl at ffffffffa0009dc4 [dm_mod] The race results in a NULL pointer because: Process A (vgchange -ay -K): a. send DM_LIST_VERSIONS_CMD ioctl; b. pool_target not registered; c. modprobe dm_thin_pool and wait until end. Process B (vgchange -ay -K): a. send DM_LIST_VERSIONS_CMD ioctl; b. pool_target registered; c. table_load->dm_table_add_target->pool_ctr; d. _new_mapping_cache is NULL and panic. Note: 1. process A and process B are two concurrent processes. 2. pool_target can be detected by process B but _new_mapping_cache initialization has not ended. To fix dm-thin-pool, and other targets (cache, multipath, and snapshot) with the same problem, simply dm_register_target() after all resources created during module init (as labelled with __init) are finished. Signed-off-by: monty Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-cache-target.c | 12 ++++----- drivers/md/dm-mpath.c | 18 +++++++------- drivers/md/dm-snap.c | 48 ++++++++++++++++++------------------ drivers/md/dm-thin.c | 22 ++++++++--------- 4 files changed, 49 insertions(+), 51 deletions(-) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 0b7edfd0b454e4..71c3507df9a0ee 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -3554,18 +3554,18 @@ static int __init dm_cache_init(void) { int r; - r = dm_register_target(&cache_target); - if (r) { - DMERR("cache target registration failed: %d", r); - return r; - } - migration_cache = KMEM_CACHE(dm_cache_migration, 0); if (!migration_cache) { dm_unregister_target(&cache_target); return -ENOMEM; } + r = dm_register_target(&cache_target); + if (r) { + DMERR("cache target registration failed: %d", r); + return r; + } + return 0; } diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index e8094d8fbe0d5f..35e82b14ded75f 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1965,13 +1965,6 @@ static int __init dm_multipath_init(void) { int r; - r = dm_register_target(&multipath_target); - if (r < 0) { - DMERR("request-based register failed %d", r); - r = -EINVAL; - goto bad_register_target; - } - kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); if (!kmultipathd) { DMERR("failed to create workqueue kmpathd"); @@ -1993,13 +1986,20 @@ static int __init dm_multipath_init(void) goto bad_alloc_kmpath_handlerd; } + r = dm_register_target(&multipath_target); + if (r < 0) { + DMERR("request-based register failed %d", r); + r = -EINVAL; + goto bad_register_target; + } + return 0; +bad_register_target: + destroy_workqueue(kmpath_handlerd); bad_alloc_kmpath_handlerd: destroy_workqueue(kmultipathd); bad_alloc_kmultipathd: - dm_unregister_target(&multipath_target); -bad_register_target: return r; } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 1113b42e1edae4..a0613bd8ed00ef 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -2411,24 +2411,6 @@ static int __init dm_snapshot_init(void) return r; } - r = dm_register_target(&snapshot_target); - if (r < 0) { - DMERR("snapshot target register failed %d", r); - goto bad_register_snapshot_target; - } - - r = dm_register_target(&origin_target); - if (r < 0) { - DMERR("Origin target register failed %d", r); - goto bad_register_origin_target; - } - - r = dm_register_target(&merge_target); - if (r < 0) { - DMERR("Merge target register failed %d", r); - goto bad_register_merge_target; - } - r = init_origin_hash(); if (r) { DMERR("init_origin_hash failed."); @@ -2449,19 +2431,37 @@ static int __init dm_snapshot_init(void) goto bad_pending_cache; } + r = dm_register_target(&snapshot_target); + if (r < 0) { + DMERR("snapshot target register failed %d", r); + goto bad_register_snapshot_target; + } + + r = dm_register_target(&origin_target); + if (r < 0) { + DMERR("Origin target register failed %d", r); + goto bad_register_origin_target; + } + + r = dm_register_target(&merge_target); + if (r < 0) { + DMERR("Merge target register failed %d", r); + goto bad_register_merge_target; + } + return 0; -bad_pending_cache: - kmem_cache_destroy(exception_cache); -bad_exception_cache: - exit_origin_hash(); -bad_origin_hash: - dm_unregister_target(&merge_target); bad_register_merge_target: dm_unregister_target(&origin_target); bad_register_origin_target: dm_unregister_target(&snapshot_target); bad_register_snapshot_target: + kmem_cache_destroy(pending_cache); +bad_pending_cache: + kmem_cache_destroy(exception_cache); +bad_exception_cache: + exit_origin_hash(); +bad_origin_hash: dm_exception_store_exit(); return r; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 1e25705209c27f..02e42ba2ecbcd8 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -4355,30 +4355,28 @@ static struct target_type thin_target = { static int __init dm_thin_init(void) { - int r; + int r = -ENOMEM; pool_table_init(); + _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); + if (!_new_mapping_cache) + return r; + r = dm_register_target(&thin_target); if (r) - return r; + goto bad_new_mapping_cache; r = dm_register_target(&pool_target); if (r) - goto bad_pool_target; - - r = -ENOMEM; - - _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); - if (!_new_mapping_cache) - goto bad_new_mapping_cache; + goto bad_thin_target; return 0; -bad_new_mapping_cache: - dm_unregister_target(&pool_target); -bad_pool_target: +bad_thin_target: dm_unregister_target(&thin_target); +bad_new_mapping_cache: + kmem_cache_destroy(_new_mapping_cache); return r; } From 7dd1362247ebfe06ab1f58d7fee739c5f773a0ac Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 14 Dec 2017 21:24:08 -0500 Subject: [PATCH 0660/3326] SUNRPC: Fix a race in the receive code path commit 90d91b0cd371193d9dbfa9beacab8ab9a4cb75e0 upstream. We must ensure that the call to rpc_sleep_on() in xprt_transmit() cannot race with the call to xprt_complete_rqst(). Reported-by: Chuck Lever Link: https://bugzilla.linux-nfs.org/show_bug.cgi?id=317 Fixes: ce7c252a8c74 ("SUNRPC: Add a separate spinlock to protect..") Reviewed-by: Chuck Lever Signed-off-by: Trond Myklebust Signed-off-by: Anna Schumaker Signed-off-by: Greg Kroah-Hartman --- net/sunrpc/xprt.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 898485e3ece4e8..8eb0c4f3b3e96a 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1001,6 +1001,7 @@ void xprt_transmit(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; + unsigned int connect_cookie; int status, numreqs; dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); @@ -1024,6 +1025,7 @@ void xprt_transmit(struct rpc_task *task) } else if (!req->rq_bytes_sent) return; + connect_cookie = xprt->connect_cookie; req->rq_xtime = ktime_get(); status = xprt->ops->send_request(task); trace_xprt_transmit(xprt, req->rq_xid, status); @@ -1047,20 +1049,28 @@ void xprt_transmit(struct rpc_task *task) xprt->stat.bklog_u += xprt->backlog.qlen; xprt->stat.sending_u += xprt->sending.qlen; xprt->stat.pending_u += xprt->pending.qlen; + spin_unlock_bh(&xprt->transport_lock); - /* Don't race with disconnect */ - if (!xprt_connected(xprt)) - task->tk_status = -ENOTCONN; - else { + req->rq_connect_cookie = connect_cookie; + if (rpc_reply_expected(task) && !READ_ONCE(req->rq_reply_bytes_recvd)) { /* - * Sleep on the pending queue since - * we're expecting a reply. + * Sleep on the pending queue if we're expecting a reply. + * The spinlock ensures atomicity between the test of + * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on(). */ - if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) + spin_lock(&xprt->recv_lock); + if (!req->rq_reply_bytes_recvd) { rpc_sleep_on(&xprt->pending, task, xprt_timer); - req->rq_connect_cookie = xprt->connect_cookie; + /* + * Send an extra queue wakeup call if the + * connection was dropped in case the call to + * rpc_sleep_on() raced. + */ + if (!xprt_connected(xprt)) + xprt_wake_pending_tasks(xprt, -ENOTCONN); + } + spin_unlock(&xprt->recv_lock); } - spin_unlock_bh(&xprt->transport_lock); } static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) From 2d8262155ab3d20219ca5815f65573592d717c3a Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Mon, 27 Nov 2017 13:16:32 -0800 Subject: [PATCH 0661/3326] iw_cxgb4: only insert drain cqes if wq is flushed commit c058ecf6e455fac7346d46197a02398ead90851f upstream. Only insert our special drain CQEs to support ib_drain_sq/rq() after the wq is flushed. Otherwise, existing but not yet polled CQEs can be returned out of order to the user application. This can happen when the QP has exited RTS but not yet flushed the QP, which can happen during a normal close (vs abortive close). In addition never count the drain CQEs when determining how many CQEs need to be synthesized during the flush operation. This latter issue should never happen if the QP is properly flushed before inserting the drain CQE, but I wanted to avoid corrupting the CQ state. So we handle it and log a warning once. Fixes: 4fe7c2962e11 ("iw_cxgb4: refactor sq/rq drain logic") Signed-off-by: Steve Wise Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/cxgb4/cq.c | 5 +++++ drivers/infiniband/hw/cxgb4/qp.c | 14 ++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index be07da1997e68e..eae8ea81c6e271 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -410,6 +410,11 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) { + if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) { + WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid); + return 0; + } + if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) return 0; diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index cb7fc0d35d1d14..e69453665a174a 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -868,7 +868,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, qhp = to_c4iw_qp(ibqp); spin_lock_irqsave(&qhp->lock, flag); - if (t4_wq_in_error(&qhp->wq)) { + + /* + * If the qp has been flushed, then just insert a special + * drain cqe. + */ + if (qhp->wq.flushed) { spin_unlock_irqrestore(&qhp->lock, flag); complete_sq_drain_wr(qhp, wr); return err; @@ -1012,7 +1017,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, qhp = to_c4iw_qp(ibqp); spin_lock_irqsave(&qhp->lock, flag); - if (t4_wq_in_error(&qhp->wq)) { + + /* + * If the qp has been flushed, then just insert a special + * drain cqe. + */ + if (qhp->wq.flushed) { spin_unlock_irqrestore(&qhp->lock, flag); complete_rq_drain_wr(qhp, wr); return err; From 1bd2b3061273f1bd7d691e3ea5c15e1f57f8ab6d Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 4 Dec 2017 15:40:55 +0300 Subject: [PATCH 0662/3326] x86/boot/compressed/64: Detect and handle 5-level paging at boot-time commit 08529078d8d9adf689bf39cc38d53979a0869970 upstream. Prerequisite for fixing the current problem of instantaneous reboots when a 5-level paging kernel is booted on 4-level paging hardware. At the same time this change prepares the decompression code to boot-time switching between 4- and 5-level paging. [ tglx: Folded the GCC < 5 fix. ] Fixes: 77ef56e4f0fb ("x86: Enable 5-level paging support via CONFIG_X86_5LEVEL=y") Signed-off-by: Kirill A. Shutemov Signed-off-by: Thomas Gleixner Cc: Andi Kleen Cc: Andy Lutomirski Cc: linux-mm@kvack.org Cc: Cyrill Gorcunov Cc: Borislav Petkov Cc: Linus Torvalds Link: https://lkml.kernel.org/r/20171204124059.63515-2-kirill.shutemov@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/boot/compressed/Makefile | 1 + arch/x86/boot/compressed/head_64.S | 16 +++++++++++---- arch/x86/boot/compressed/pgtable_64.c | 28 +++++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 4 deletions(-) create mode 100644 arch/x86/boot/compressed/pgtable_64.c diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 4b7575b0056311..98018a621f6b0c 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -78,6 +78,7 @@ vmlinux-objs-$(CONFIG_EARLY_PRINTK) += $(obj)/early_serial_console.o vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o ifdef CONFIG_X86_64 vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o + vmlinux-objs-y += $(obj)/pgtable_64.o endif $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index beb255b66447a8..4b3d92a37c801a 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -289,10 +289,18 @@ ENTRY(startup_64) leaq boot_stack_end(%rbx), %rsp #ifdef CONFIG_X86_5LEVEL - /* Check if 5-level paging has already enabled */ - movq %cr4, %rax - testl $X86_CR4_LA57, %eax - jnz lvl5 + /* + * Check if we need to enable 5-level paging. + * RSI holds real mode data and need to be preserved across + * a function call. + */ + pushq %rsi + call l5_paging_required + popq %rsi + + /* If l5_paging_required() returned zero, we're done here. */ + cmpq $0, %rax + je lvl5 /* * At this point we are in long mode with 4-level paging enabled, diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c new file mode 100644 index 00000000000000..b4469a37e9a16f --- /dev/null +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -0,0 +1,28 @@ +#include + +/* + * __force_order is used by special_insns.h asm code to force instruction + * serialization. + * + * It is not referenced from the code, but GCC < 5 with -fPIE would fail + * due to an undefined symbol. Define it to make these ancient GCCs work. + */ +unsigned long __force_order; + +int l5_paging_required(void) +{ + /* Check if leaf 7 is supported. */ + + if (native_cpuid_eax(0) < 7) + return 0; + + /* Check if la57 is supported. */ + if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) + return 0; + + /* Check if 5-level paging has already been enabled. */ + if (native_read_cr4() & X86_CR4_LA57) + return 0; + + return 1; +} From e48a17d115f84048294ae366ff6e9dd7c8c6cca4 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 4 Dec 2017 15:40:56 +0300 Subject: [PATCH 0663/3326] x86/boot/compressed/64: Print error if 5-level paging is not supported commit 6d7e0ba2d2be9e50cccba213baf07e0e183c1b24 upstream. If the machine does not support the paging mode for which the kernel was compiled, the boot process cannot continue. It's not possible to let the kernel detect the mismatch as it does not even reach the point where cpu features can be evaluted due to a triple fault in the KASLR setup. Instead of instantaneous silent reboot, emit an error message which gives the user the information why the boot fails. Fixes: 77ef56e4f0fb ("x86: Enable 5-level paging support via CONFIG_X86_5LEVEL=y") Reported-by: Borislav Petkov Signed-off-by: Kirill A. Shutemov Signed-off-by: Thomas Gleixner Tested-by: Borislav Petkov Cc: Andi Kleen Cc: Andy Lutomirski Cc: linux-mm@kvack.org Cc: Cyrill Gorcunov Cc: Linus Torvalds Link: https://lkml.kernel.org/r/20171204124059.63515-3-kirill.shutemov@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/boot/compressed/misc.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index b50c42455e2525..98761a1576ceb5 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -169,6 +169,16 @@ void __puthex(unsigned long value) } } +static bool l5_supported(void) +{ + /* Check if leaf 7 is supported. */ + if (native_cpuid_eax(0) < 7) + return 0; + + /* Check if la57 is supported. */ + return native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)); +} + #if CONFIG_X86_NEED_RELOCS static void handle_relocations(void *output, unsigned long output_len, unsigned long virt_addr) @@ -362,6 +372,12 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, console_init(); debug_putstr("early console in extract_kernel\n"); + if (IS_ENABLED(CONFIG_X86_5LEVEL) && !l5_supported()) { + error("This linux kernel as configured requires 5-level paging\n" + "This CPU does not support the required 'cr4.la57' feature\n" + "Unable to boot - please use a kernel appropriate for your CPU\n"); + } + free_mem_ptr = heap; /* Heap */ free_mem_end_ptr = heap + BOOT_HEAP_SIZE; From 6e46e964e2978f64f30b89be2423093b43e77466 Mon Sep 17 00:00:00 2001 From: David Lechner Date: Sun, 3 Dec 2017 19:54:41 -0600 Subject: [PATCH 0664/3326] eeprom: at24: change nvmem stride to 1 commit 7f6d2ecd3d7acaf205ea7b3e96f9ffc55b92298b upstream. Trying to read the MAC address from an eeprom that has an offset that is not a multiple of 4 causes an error currently. Fix it by changing the nvmem stride to 1. Signed-off-by: David Lechner [Bartosz: tweaked the commit message] Signed-off-by: Bartosz Golaszewski Signed-off-by: Greg Kroah-Hartman --- drivers/misc/eeprom/at24.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 372b2060fbba1e..4cc0b42f2acc5b 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -776,7 +776,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) at24->nvmem_config.reg_read = at24_read; at24->nvmem_config.reg_write = at24_write; at24->nvmem_config.priv = at24; - at24->nvmem_config.stride = 4; + at24->nvmem_config.stride = 1; at24->nvmem_config.word_size = 1; at24->nvmem_config.size = chip.byte_len; From 3df23f7ce7255d1ef2a616071cac359a245fb6de Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 15 Dec 2017 10:32:03 +0100 Subject: [PATCH 0665/3326] posix-timer: Properly check sigevent->sigev_notify commit cef31d9af908243421258f1df35a4a644604efbe upstream. timer_create() specifies via sigevent->sigev_notify the signal delivery for the new timer. The valid modes are SIGEV_NONE, SIGEV_SIGNAL, SIGEV_THREAD and (SIGEV_SIGNAL | SIGEV_THREAD_ID). The sanity check in good_sigevent() is only checking the valid combination for the SIGEV_THREAD_ID bit, i.e. SIGEV_SIGNAL, but if SIGEV_THREAD_ID is not set it accepts any random value. This has no real effects on the posix timer and signal delivery code, but it affects show_timer() which handles the output of /proc/$PID/timers. That function uses a string array to pretty print sigev_notify. The access to that array has no bound checks, so random sigev_notify cause access beyond the array bounds. Add proper checks for the valid notify modes and remove the SIGEV_THREAD_ID masking from various code pathes as SIGEV_NONE can never be set in combination with SIGEV_THREAD_ID. Reported-by: Eric Biggers Reported-by: Dmitry Vyukov Reported-by: Alexey Dobriyan Signed-off-by: Thomas Gleixner Cc: John Stultz Signed-off-by: Greg Kroah-Hartman --- kernel/time/posix-timers.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 13d6881f908b7f..ec999f32c84058 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -434,17 +434,22 @@ static struct pid *good_sigevent(sigevent_t * event) { struct task_struct *rtn = current->group_leader; - if ((event->sigev_notify & SIGEV_THREAD_ID ) && - (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || - !same_thread_group(rtn, current) || - (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL)) + switch (event->sigev_notify) { + case SIGEV_SIGNAL | SIGEV_THREAD_ID: + rtn = find_task_by_vpid(event->sigev_notify_thread_id); + if (!rtn || !same_thread_group(rtn, current)) + return NULL; + /* FALLTHRU */ + case SIGEV_SIGNAL: + case SIGEV_THREAD: + if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX) + return NULL; + /* FALLTHRU */ + case SIGEV_NONE: + return task_pid(rtn); + default: return NULL; - - if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && - ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) - return NULL; - - return task_pid(rtn); + } } static struct k_itimer * alloc_posix_timer(void) @@ -669,7 +674,7 @@ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting) struct timespec64 ts64; bool sig_none; - sig_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE; + sig_none = timr->it_sigev_notify == SIGEV_NONE; iv = timr->it_interval; /* interval timer ? */ @@ -856,7 +861,7 @@ int common_timer_set(struct k_itimer *timr, int flags, timr->it_interval = timespec64_to_ktime(new_setting->it_interval); expires = timespec64_to_ktime(new_setting->it_value); - sigev_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE; + sigev_none = timr->it_sigev_notify == SIGEV_NONE; kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none); timr->it_active = !sigev_none; From 0aa5d007ba673bc35378422b0c2b2b797da69424 Mon Sep 17 00:00:00 2001 From: Adam Wallis Date: Mon, 27 Nov 2017 10:45:01 -0500 Subject: [PATCH 0666/3326] dmaengine: dmatest: move callback wait queue to thread context commit 6f6a23a213be51728502b88741ba6a10cda2441d upstream. Commit adfa543e7314 ("dmatest: don't use set_freezable_with_signal()") introduced a bug (that is in fact documented by the patch commit text) that leaves behind a dangling pointer. Since the done_wait structure is allocated on the stack, future invocations to the DMATEST can produce undesirable results (e.g., corrupted spinlocks). Commit a9df21e34b42 ("dmaengine: dmatest: warn user when dma test times out") attempted to WARN the user that the stack was likely corrupted but did not fix the actual issue. This patch fixes the issue by pushing the wait queue and callback structs into the the thread structure. If a failure occurs due to time, dmaengine_terminate_all will force the callback to safely call wake_up_all() without possibility of using a freed pointer. Bug: https://bugzilla.kernel.org/show_bug.cgi?id=197605 Fixes: adfa543e7314 ("dmatest: don't use set_freezable_with_signal()") Reviewed-by: Sinan Kaya Suggested-by: Shunyong Yang Signed-off-by: Adam Wallis Signed-off-by: Vinod Koul Signed-off-by: Greg Kroah-Hartman --- drivers/dma/dmatest.c | 55 ++++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 24 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 47edc7fbf91f52..ec5f9d2bc8202f 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -155,6 +155,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)"); #define PATTERN_COUNT_MASK 0x1f #define PATTERN_MEMSET_IDX 0x01 +/* poor man's completion - we want to use wait_event_freezable() on it */ +struct dmatest_done { + bool done; + wait_queue_head_t *wait; +}; + struct dmatest_thread { struct list_head node; struct dmatest_info *info; @@ -165,6 +171,8 @@ struct dmatest_thread { u8 **dsts; u8 **udsts; enum dma_transaction_type type; + wait_queue_head_t done_wait; + struct dmatest_done test_done; bool done; }; @@ -342,18 +350,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, return error_count; } -/* poor man's completion - we want to use wait_event_freezable() on it */ -struct dmatest_done { - bool done; - wait_queue_head_t *wait; -}; static void dmatest_callback(void *arg) { struct dmatest_done *done = arg; - - done->done = true; - wake_up_all(done->wait); + struct dmatest_thread *thread = + container_of(arg, struct dmatest_thread, done_wait); + if (!thread->done) { + done->done = true; + wake_up_all(done->wait); + } else { + /* + * If thread->done, it means that this callback occurred + * after the parent thread has cleaned up. This can + * happen in the case that driver doesn't implement + * the terminate_all() functionality and a dma operation + * did not occur within the timeout period + */ + WARN(1, "dmatest: Kernel memory may be corrupted!!\n"); + } } static unsigned int min_odd(unsigned int x, unsigned int y) @@ -424,9 +439,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) */ static int dmatest_func(void *data) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); struct dmatest_thread *thread = data; - struct dmatest_done done = { .wait = &done_wait }; + struct dmatest_done *done = &thread->test_done; struct dmatest_info *info; struct dmatest_params *params; struct dma_chan *chan; @@ -673,9 +687,9 @@ static int dmatest_func(void *data) continue; } - done.done = false; + done->done = false; tx->callback = dmatest_callback; - tx->callback_param = &done; + tx->callback_param = done; cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { @@ -688,21 +702,12 @@ static int dmatest_func(void *data) } dma_async_issue_pending(chan); - wait_event_freezable_timeout(done_wait, done.done, + wait_event_freezable_timeout(thread->done_wait, done->done, msecs_to_jiffies(params->timeout)); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); - if (!done.done) { - /* - * We're leaving the timed out dma operation with - * dangling pointer to done_wait. To make this - * correct, we'll need to allocate wait_done for - * each test iteration and perform "who's gonna - * free it this time?" dancing. For now, just - * leave it dangling. - */ - WARN(1, "dmatest: Kernel stack may be corrupted!!\n"); + if (!done->done) { dmaengine_unmap_put(um); result("test timed out", total_tests, src_off, dst_off, len, 0); @@ -789,7 +794,7 @@ static int dmatest_func(void *data) dmatest_KBs(runtime, total_len), ret); /* terminate all transfers on specified channels */ - if (ret) + if (ret || failed_tests) dmaengine_terminate_all(chan); thread->done = true; @@ -849,6 +854,8 @@ static int dmatest_add_threads(struct dmatest_info *info, thread->info = info; thread->chan = dtc->chan; thread->type = type; + thread->test_done.wait = &thread->done_wait; + init_waitqueue_head(&thread->done_wait); smp_wmb(); thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", dma_chan_name(chan), op, i); From 2dea756b487547788c1f28439fbc3df270e6bf30 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 12 Dec 2017 11:28:38 -0800 Subject: [PATCH 0667/3326] Revert "exec: avoid RLIMIT_STACK races with prlimit()" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 779f4e1c6c7c661db40dfebd6dd6bda7b5f88aa3 upstream. This reverts commit 04e35f4495dd560db30c25efca4eecae8ec8c375. SELinux runs with secureexec for all non-"noatsecure" domain transitions, which means lots of processes end up hitting the stack hard-limit change that was introduced in order to fix a race with prlimit(). That race fix will need to be redesigned. Reported-by: Laura Abbott Reported-by: Tomáš Trnka Signed-off-by: Kees Cook Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/exec.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index 4726c777dd387b..3e14ba25f678bf 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1340,15 +1340,10 @@ void setup_new_exec(struct linux_binprm * bprm) * avoid bad behavior from the prior rlimits. This has to * happen before arch_pick_mmap_layout(), which examines * RLIMIT_STACK, but after the point of no return to avoid - * races from other threads changing the limits. This also - * must be protected from races with prlimit() calls. + * needing to clean up the change on failure. */ - task_lock(current->group_leader); if (current->signal->rlim[RLIMIT_STACK].rlim_cur > _STK_LIM) current->signal->rlim[RLIMIT_STACK].rlim_cur = _STK_LIM; - if (current->signal->rlim[RLIMIT_STACK].rlim_max > _STK_LIM) - current->signal->rlim[RLIMIT_STACK].rlim_max = _STK_LIM; - task_unlock(current->group_leader); } arch_pick_mmap_layout(current->mm); From ae0bba6b38110e8b57b92aa820df8f78d3965fe6 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sun, 3 Dec 2017 20:38:01 -0500 Subject: [PATCH 0668/3326] ext4: support fast symlinks from ext3 file systems commit fc82228a5e3860502dbf3bfa4a9570cb7093cf7f upstream. 407cd7fb83c0 (ext4: change fast symlink test to not rely on i_blocks) broke ~10 years old ext3 file systems created by 2.6.17. Any ELF executable fails because the /lib/ld-linux.so.2 fast symlink cannot be read anymore. The patch assumed fast symlinks were created in a specific way, but that's not true on these really old file systems. The new behavior is apparently needed only with the large EA inode feature. Revert to the old behavior if the large EA inode feature is not set. This makes my old VM boot again. Fixes: 407cd7fb83c0 (ext4: change fast symlink test to not rely on i_blocks) Signed-off-by: Andi Kleen Signed-off-by: Theodore Ts'o Reviewed-by: Andreas Dilger Signed-off-by: Greg Kroah-Hartman --- fs/ext4/inode.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 38eb621edd80c9..ea2ccc524bd98b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -149,6 +149,15 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, */ int ext4_inode_is_fast_symlink(struct inode *inode) { + if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { + int ea_blocks = EXT4_I(inode)->i_file_acl ? + EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; + + if (ext4_has_inline_data(inode)) + return 0; + + return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); + } return S_ISLNK(inode->i_mode) && inode->i_size && (inode->i_size < EXT4_N_BLOCKS * 4); } From 4ff7da066d64e5083ef886203ba3a0623343c632 Mon Sep 17 00:00:00 2001 From: Eryu Guan Date: Sun, 3 Dec 2017 22:52:51 -0500 Subject: [PATCH 0669/3326] ext4: fix fdatasync(2) after fallocate(2) operation commit c894aa97577e47d3066b27b32499ecf899bfa8b0 upstream. Currently, fallocate(2) with KEEP_SIZE followed by a fdatasync(2) then crash, we'll see wrong allocated block number (stat -c %b), the blocks allocated beyond EOF are all lost. fstests generic/468 exposes this bug. Commit 67a7d5f561f4 ("ext4: fix fdatasync(2) after extent manipulation operations") fixed all the other extent manipulation operation paths such as hole punch, zero range, collapse range etc., but forgot the fallocate case. So similarly, fix it by recording the correct journal tid in ext4 inode in fallocate(2) path, so that ext4_sync_file() will wait for the right tid to be committed on fdatasync(2). This addresses the test failure in xfstests test generic/468. Signed-off-by: Eryu Guan Signed-off-by: Theodore Ts'o Signed-off-by: Greg Kroah-Hartman --- fs/ext4/extents.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 07bca11749d406..c941251ac0c008 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4722,6 +4722,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, EXT4_INODE_EOFBLOCKS); } ext4_mark_inode_dirty(handle, inode); + ext4_update_inode_fsync_trans(handle, inode, 1); ret2 = ext4_journal_stop(handle); if (ret2) break; From 0228af23dd6962c16682d85fdf1bab6bbf77a007 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sun, 10 Dec 2017 23:44:11 -0500 Subject: [PATCH 0670/3326] ext4: add missing error check in __ext4_new_inode() commit 996fc4477a0ea28226b30d175f053fb6f9a4fa36 upstream. It's possible for ext4_get_acl() to return an ERR_PTR. So we need to add a check for this case in __ext4_new_inode(). Otherwise on an error we can end up oops the kernel. This was getting triggered by xfstests generic/388, which is a test which exercises the shutdown code path. Signed-off-by: Theodore Ts'o Signed-off-by: Greg Kroah-Hartman --- fs/ext4/ialloc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index c5f697a3fad43b..207588dc803e06 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -816,6 +816,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, #ifdef CONFIG_EXT4_FS_POSIX_ACL struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT); + if (IS_ERR(p)) + return ERR_CAST(p); if (p) { int acl_size = p->a_count * sizeof(ext4_acl_entry); From 38620054ea18edb165451bf24367f212960ff344 Mon Sep 17 00:00:00 2001 From: Chandan Rajendra Date: Mon, 11 Dec 2017 15:00:57 -0500 Subject: [PATCH 0671/3326] ext4: fix crash when a directory's i_size is too small commit 9d5afec6b8bd46d6ed821aa1579634437f58ef1f upstream. On a ppc64 machine, when mounting a fuzzed ext2 image (generated by fsfuzzer) the following call trace is seen, VFS: brelse: Trying to free free buffer WARNING: CPU: 1 PID: 6913 at /root/repos/linux/fs/buffer.c:1165 .__brelse.part.6+0x24/0x40 .__brelse.part.6+0x20/0x40 (unreliable) .ext4_find_entry+0x384/0x4f0 .ext4_lookup+0x84/0x250 .lookup_slow+0xdc/0x230 .walk_component+0x268/0x400 .path_lookupat+0xec/0x2d0 .filename_lookup+0x9c/0x1d0 .vfs_statx+0x98/0x140 .SyS_newfstatat+0x48/0x80 system_call+0x58/0x6c This happens because the directory that ext4_find_entry() looks up has inode->i_size that is less than the block size of the filesystem. This causes 'nblocks' to have a value of zero. ext4_bread_batch() ends up not reading any of the directory file's blocks. This renders the entries in bh_use[] array to continue to have garbage data. buffer_uptodate() on bh_use[0] can then return a zero value upon which brelse() function is invoked. This commit fixes the bug by returning -ENOENT when the directory file has no associated blocks. Reported-by: Abdul Haleem Signed-off-by: Chandan Rajendra Signed-off-by: Greg Kroah-Hartman --- fs/ext4/namei.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index bd48a8d83961bc..fccf295fcb03a5 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1399,6 +1399,10 @@ static struct buffer_head * ext4_find_entry (struct inode *dir, "falling back\n")); } nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); + if (!nblocks) { + ret = NULL; + goto cleanup_and_exit; + } start = EXT4_I(dir)->i_dir_start_lookup; if (start >= nblocks) start = 0; From b3e3728ed55b9ddd7b1600ebad11f16aa665dfcb Mon Sep 17 00:00:00 2001 From: Guy Levi Date: Wed, 25 Oct 2017 22:39:35 +0300 Subject: [PATCH 0672/3326] IB/mlx4: Fix RSS's QPC attributes assignments [ Upstream commit 108809a0571cd1e1b317c5c083a371e163e1f8f9 ] In the modify QP handler the base_qpn_udp field in the RSS QPC is overwrite later by irrelevant value assignment. Hence, ingress packets which gets to the RSS QP will be steered then to a garbage QPN. The patch fixes this by skipping the above assignment when a RSS QP is modified, also, the RSS context's attributes assignments are relocated just before the context is posted to avoid future issues like this. Additionally, this patch takes the opportunity to change the code to be disciplined to the device's manual and assigns the RSS QP context just at RESET to INIT transition. Fixes:3078f5f1bd8b ("IB/mlx4: Add support for RSS QP") Signed-off-by: Guy Levi Reviewed-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/mlx4/qp.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 17e44c86577a9b..fcfa0874789959 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -2182,11 +2182,6 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); - if (rwq_ind_tbl) { - fill_qp_rss_context(context, qp); - context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET); - } - if (!(attr_mask & IB_QP_PATH_MIG_STATE)) context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); else { @@ -2387,6 +2382,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, context->pd = cpu_to_be32(pd->pdn); if (!rwq_ind_tbl) { + context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); get_cqs(qp, src_type, &send_cq, &recv_cq); } else { /* Set dummy CQs to be compatible with HV and PRM */ send_cq = to_mcq(rwq_ind_tbl->ind_tbl[0]->cq); @@ -2394,7 +2390,6 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, } context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn); - context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); /* Set "fast registration enabled" for all kernel QPs */ if (!ibuobject) @@ -2513,7 +2508,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, MLX4_IB_LINK_TYPE_ETH; if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { /* set QP to receive both tunneled & non-tunneled packets */ - if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET))) + if (!rwq_ind_tbl) context->srqn = cpu_to_be32(7 << 28); } } @@ -2562,6 +2557,13 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, } } + if (rwq_ind_tbl && + cur_state == IB_QPS_RESET && + new_state == IB_QPS_INIT) { + fill_qp_rss_context(context, qp); + context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET); + } + err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), to_mlx4_state(new_state), context, optpar, sqd_event, &qp->mqp); From dba1ca0e9a6fc57703eaa3c8d7293b8efc72c2af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Szymanski?= Date: Fri, 10 Nov 2017 10:01:43 +0100 Subject: [PATCH 0673/3326] HID: cp2112: fix broken gpio_direction_input callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 7da85fbf1c87d4f73621e0e7666a3387497075a9 ] When everything goes smoothly, ret is set to 0 which makes the function to return EIO error. Fixes: 8e9faa15469e ("HID: cp2112: fix gpio-callback error handling") Signed-off-by: Sébastien Szymanski Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/hid/hid-cp2112.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 078026f63b6f40..4e940a096b2ac2 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -196,6 +196,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error requesting GPIO config: %d\n", ret); + if (ret >= 0) + ret = -EIO; goto exit; } @@ -205,8 +207,10 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); - if (ret < 0) { + if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error setting GPIO config: %d\n", ret); + if (ret >= 0) + ret = -EIO; goto exit; } @@ -214,7 +218,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) exit: mutex_unlock(&dev->lock); - return ret < 0 ? ret : -EIO; + return ret; } static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) From 71307dd192e7ac012894dfebd9c47cb5afc0611b Mon Sep 17 00:00:00 2001 From: Robert Stonehouse Date: Tue, 7 Nov 2017 17:30:30 +0000 Subject: [PATCH 0674/3326] sfc: don't warn on successful change of MAC [ Upstream commit cbad52e92ad7f01f0be4ca58bde59462dc1afe3a ] Fixes: 535a61777f44e ("sfc: suppress handled MCDI failures when changing the MAC address") Signed-off-by: Bert Kenward Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/sfc/ef10.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 13f72f5b18d20d..09352ee43b55cb 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -5726,7 +5726,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) * MCFW do not support VFs. */ rc = efx_ef10_vport_set_mac_address(efx); - } else { + } else if (rc) { efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, sizeof(inbuf), NULL, 0, rc); } From f9a40df232448539f6586f9a0c72ca89fd694fbe Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 9 Nov 2017 18:09:33 +0100 Subject: [PATCH 0675/3326] fbdev: controlfb: Add missing modes to fix out of bounds access [ Upstream commit ac831a379d34109451b3c41a44a20ee10ecb615f ] Dan's static analysis says: drivers/video/fbdev/controlfb.c:560 control_setup() error: buffer overflow 'control_mac_modes' 20 <= 21 Indeed, control_mac_modes[] has only 20 elements, while VMODE_MAX is 22, which may lead to an out of bounds read when parsing vmode commandline options. The bug was introduced in v2.4.5.6, when 2 new modes were added to macmodes.h, but control_mac_modes[] wasn't updated: https://kernel.opensuse.org/cgit/kernel/diff/include/video/macmodes.h?h=v2.5.2&id=29f279c764808560eaceb88fef36cbc35c529aad Augment control_mac_modes[] with the two new video modes to fix this. Reported-by: Dan Carpenter Signed-off-by: Geert Uytterhoeven Cc: Dan Carpenter Cc: Benjamin Herrenschmidt Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/controlfb.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/video/fbdev/controlfb.h b/drivers/video/fbdev/controlfb.h index 6026c60fc1007e..261522fabdac89 100644 --- a/drivers/video/fbdev/controlfb.h +++ b/drivers/video/fbdev/controlfb.h @@ -141,5 +141,7 @@ static struct max_cmodes control_mac_modes[] = { {{ 1, 2}}, /* 1152x870, 75Hz */ {{ 0, 1}}, /* 1280x960, 75Hz */ {{ 0, 1}}, /* 1280x1024, 75Hz */ + {{ 1, 2}}, /* 1152x768, 60Hz */ + {{ 0, 1}}, /* 1600x1024, 60Hz */ }; From 456279d0e3ad7cb3b6b90aeb07c189c7f8590ab0 Mon Sep 17 00:00:00 2001 From: Ladislav Michl Date: Thu, 9 Nov 2017 18:09:30 +0100 Subject: [PATCH 0676/3326] video: udlfb: Fix read EDID timeout [ Upstream commit c98769475575c8a585f5b3952f4b5f90266f699b ] While usb_control_msg function expects timeout in miliseconds, a value of HZ is used. Replace it with USB_CTRL_GET_TIMEOUT and also fix error message which looks like: udlfb: Read EDID byte 78 failed err ffffff92 as error is either negative errno or number of bytes transferred use %d format specifier. Returned EDID is in second byte, so return error when less than two bytes are received. Fixes: 18dffdf8913a ("staging: udlfb: enhance EDID and mode handling support") Signed-off-by: Ladislav Michl Cc: Bernie Thompson Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/udlfb.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index ef08a104fb42c6..d44f14242016e0 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -769,11 +769,11 @@ static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len) for (i = 0; i < len; i++) { ret = usb_control_msg(dev->udev, - usb_rcvctrlpipe(dev->udev, 0), (0x02), - (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2, - HZ); - if (ret < 1) { - pr_err("Read EDID byte %d failed err %x\n", i, ret); + usb_rcvctrlpipe(dev->udev, 0), 0x02, + (0x80 | (0x02 << 5)), i << 8, 0xA1, + rbuf, 2, USB_CTRL_GET_TIMEOUT); + if (ret < 2) { + pr_err("Read EDID byte %d failed: %d\n", i, ret); i--; break; } From df235d2ee36e70a43b122c0eb34064794ef4464c Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 9 Nov 2017 18:09:28 +0100 Subject: [PATCH 0677/3326] video: fbdev: au1200fb: Release some resources if a memory allocation fails [ Upstream commit 451f130602619a17c8883dd0b71b11624faffd51 ] We should go through the error handling code instead of returning -ENOMEM directly. Signed-off-by: Christophe JAILLET Cc: Tejun Heo Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/au1200fb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index 5f04b4096c4288..99d6cfb168b5df 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1701,7 +1701,8 @@ static int au1200fb_drv_probe(struct platform_device *dev) if (!fbdev->fb_mem) { print_err("fail to allocate frambuffer (size: %dK))", fbdev->fb_len / 1024); - return -ENOMEM; + ret = -ENOMEM; + goto failed; } /* From d6f59ef5b840c5815430d63bb08e8dcbd9e2a132 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 9 Nov 2017 18:09:28 +0100 Subject: [PATCH 0678/3326] video: fbdev: au1200fb: Return an error code if a memory allocation fails [ Upstream commit 8cae353e6b01ac3f18097f631cdbceb5ff28c7f3 ] 'ret' is known to be 0 at this point. In case of memory allocation error in 'framebuffer_alloc()', return -ENOMEM instead. Signed-off-by: Christophe JAILLET Cc: Tejun Heo Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/au1200fb.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index 99d6cfb168b5df..6c542d0ca076e8 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1681,8 +1681,10 @@ static int au1200fb_drv_probe(struct platform_device *dev) fbi = framebuffer_alloc(sizeof(struct au1200fb_device), &dev->dev); - if (!fbi) + if (!fbi) { + ret = -ENOMEM; goto failed; + } _au1200fb_infos[plane] = fbi; fbdev = fbi->par; From 28461320cc952e8a971ec415743ae97120937fbd Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Tue, 7 Nov 2017 13:12:17 +0100 Subject: [PATCH 0679/3326] rtc: pcf8563: fix output clock rate [ Upstream commit a3350f9c57ffad569c40f7320b89da1f3061c5bb ] The pcf8563_clkout_recalc_rate function erroneously ignores the frequency index read from the CLKO register and always returns 32768 Hz. Fixes: a39a6405d5f9 ("rtc: pcf8563: add CLKOUT to common clock framework") Signed-off-by: Philipp Zabel Signed-off-by: Alexandre Belloni Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/rtc/rtc-pcf8563.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c index cea6ea4df970ff..8c836c51a508f3 100644 --- a/drivers/rtc/rtc-pcf8563.c +++ b/drivers/rtc/rtc-pcf8563.c @@ -422,7 +422,7 @@ static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw, return 0; buf &= PCF8563_REG_CLKO_F_MASK; - return clkout_rates[ret]; + return clkout_rates[buf]; } static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate, From 73e31f2af17843c640fd7e0c8d73f13773dd1777 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 7 Nov 2017 11:46:05 +0100 Subject: [PATCH 0680/3326] scsi: aacraid: use timespec64 instead of timeval [ Upstream commit 820f188659122602ab217dd80cfa32b3ac0c55c0 ] aacraid passes the current time to the firmware in one of two ways, either as year/month/day/... or as 32-bit unsigned seconds. The first one is broken on 32-bit architectures as it cannot go past year 2038. Using timespec64 here makes it behave properly on both 32-bit and 64-bit architectures, and avoids relying on signed integer overflow to pass times into the second interface. The interface used in aac_send_hosttime() however is still problematic in year 2106 when 32-bit seconds overflow. Hopefully we don't have to worry about aacraid by that time. Signed-off-by: Arnd Bergmann Reviewed-by: Dave Carroll Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/aacraid/commsup.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index dfe8e70f8d9968..525a652dab48e9 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -2383,19 +2383,19 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str, goto out; } -int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now) +int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now) { struct tm cur_tm; char wellness_str[] = "TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ"; u32 datasize = sizeof(wellness_str); - unsigned long local_time; + time64_t local_time; int ret = -ENODEV; if (!dev->sa_firmware) goto out; - local_time = (u32)(now->tv_sec - (sys_tz.tz_minuteswest * 60)); - time_to_tm(local_time, 0, &cur_tm); + local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60)); + time64_to_tm(local_time, 0, &cur_tm); cur_tm.tm_mon += 1; cur_tm.tm_year += 1900; wellness_str[8] = bin2bcd(cur_tm.tm_hour); @@ -2412,7 +2412,7 @@ int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now) return ret; } -int aac_send_hosttime(struct aac_dev *dev, struct timeval *now) +int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now) { int ret = -ENOMEM; struct fib *fibptr; @@ -2424,7 +2424,7 @@ int aac_send_hosttime(struct aac_dev *dev, struct timeval *now) aac_fib_init(fibptr); info = (__le32 *)fib_data(fibptr); - *info = cpu_to_le32(now->tv_sec); + *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */ ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal, 1, 1, NULL, NULL); @@ -2496,7 +2496,7 @@ int aac_command_thread(void *data) } if (!time_before(next_check_jiffies,next_jiffies) && ((difference = next_jiffies - jiffies) <= 0)) { - struct timeval now; + struct timespec64 now; int ret; /* Don't even try to talk to adapter if its sick */ @@ -2506,15 +2506,15 @@ int aac_command_thread(void *data) next_check_jiffies = jiffies + ((long)(unsigned)check_interval) * HZ; - do_gettimeofday(&now); + ktime_get_real_ts64(&now); /* Synchronize our watches */ - if (((1000000 - (1000000 / HZ)) > now.tv_usec) - && (now.tv_usec > (1000000 / HZ))) - difference = (((1000000 - now.tv_usec) * HZ) - + 500000) / 1000000; + if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec) + && (now.tv_nsec > (NSEC_PER_SEC / HZ))) + difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ) + + NSEC_PER_SEC / 2) / NSEC_PER_SEC; else { - if (now.tv_usec > 500000) + if (now.tv_nsec > NSEC_PER_SEC / 2) ++now.tv_sec; if (dev->sa_firmware) From c4760b9c8d06f541d37784afb3bbeaf895346b9b Mon Sep 17 00:00:00 2001 From: Pixel Ding Date: Wed, 8 Nov 2017 10:20:01 +0800 Subject: [PATCH 0681/3326] drm/amdgpu: bypass lru touch for KIQ ring submission MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit dce1e131dd4dc68099ff1b70aa03cd2d0acf8639 ] KIQ ring submission is used for register accessing on SRIOV VF that could happen both in irq enabled and irq disabled cases. Inversion lock could happen on adev->ring_lru_list_lock, while this operation is useless and just adds overhead in this use case. Signed-off-by: Pixel Ding Reviewed-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 5ce65280b3960f..90adff83e4898f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -136,7 +136,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) if (ring->funcs->end_use) ring->funcs->end_use(ring); - amdgpu_ring_lru_touch(ring->adev, ring); + if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) + amdgpu_ring_lru_touch(ring->adev, ring); } /** From 32f733a8f470bf37b77da08d60868aace87f78f7 Mon Sep 17 00:00:00 2001 From: Rajat Jain Date: Tue, 31 Oct 2017 14:44:24 -0700 Subject: [PATCH 0682/3326] PM / s2idle: Clear the events_check_enabled flag [ Upstream commit 95b982b45122c57da2ee0b46cce70775e1d987af ] Problem: This flag does not get cleared currently in the suspend or resume path in the following cases: * In case some driver's suspend routine returns an error. * Successful s2idle case * etc? Why is this a problem: What happens is that the next suspend attempt could fail even though the user did not enable the flag by writing to /sys/power/wakeup_count. This is 1 use case how the issue can be seen (but similar use case with driver suspend failure can be thought of): 1. Read /sys/power/wakeup_count 2. echo count > /sys/power/wakeup_count 3. echo freeze > /sys/power/wakeup_count 4. Let the system suspend, and wakeup the system using some wake source that calls pm_wakeup_event() e.g. power button or something. 5. Note that the combined wakeup count would be incremented due to the pm_wakeup_event() in the resume path. 6. After resuming the events_check_enabled flag is still set. At this point if the user attempts to freeze again (without writing to /sys/power/wakeup_count), the suspend would fail even though there has been no wake event since the past resume. Address that by clearing the flag just before a resume is completed, so that it is always cleared for the corner cases mentioned above. Signed-off-by: Rajat Jain Acked-by: Pavel Machek Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/power/suspend.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index ccd2d20e6b067f..0685c449943146 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -437,7 +437,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) error = suspend_ops->enter(state); trace_suspend_resume(TPS("machine_suspend"), state, false); - events_check_enabled = false; } else if (*wakeup) { error = -EBUSY; } @@ -582,6 +581,7 @@ static int enter_state(suspend_state_t state) pm_restore_gfp_mask(); Finish: + events_check_enabled = false; pm_pr_dbg("Finishing wakeup.\n"); suspend_finish(); Unlock: From b1d0ddc83e3cb2b9e5641ee52f235fcf463fd1d8 Mon Sep 17 00:00:00 2001 From: Pankaj Bharadiya Date: Tue, 7 Nov 2017 16:16:19 +0530 Subject: [PATCH 0683/3326] ASoC: Intel: Skylake: Fix uuid_module memory leak in failure case [ Upstream commit f8e066521192c7debe59127d90abbe2773577e25 ] In the loop that adds the uuid_module to the uuid_list list, allocated memory is not properly freed in the error path free uuid_list whenever any of the memory allocation in the loop fails to avoid memory leak. Signed-off-by: Pankaj Bharadiya Signed-off-by: Guneshwor Singh Acked-By: Vinod Koul Signed-off-by: Mark Brown Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- sound/soc/intel/skylake/skl-sst-utils.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c index 369ef7ce981c79..8ff89280d9fd44 100644 --- a/sound/soc/intel/skylake/skl-sst-utils.c +++ b/sound/soc/intel/skylake/skl-sst-utils.c @@ -251,6 +251,7 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, struct uuid_module *module; struct firmware stripped_fw; unsigned int safe_file; + int ret = 0; /* Get the FW pointer to derive ADSP header */ stripped_fw.data = fw->data; @@ -299,8 +300,10 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, for (i = 0; i < num_entry; i++, mod_entry++) { module = kzalloc(sizeof(*module), GFP_KERNEL); - if (!module) - return -ENOMEM; + if (!module) { + ret = -ENOMEM; + goto free_uuid_list; + } uuid_bin = (uuid_le *)mod_entry->uuid.id; memcpy(&module->uuid, uuid_bin, sizeof(module->uuid)); @@ -311,8 +314,8 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, size = sizeof(int) * mod_entry->instance_max_count; module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL); if (!module->instance_id) { - kfree(module); - return -ENOMEM; + ret = -ENOMEM; + goto free_uuid_list; } list_add_tail(&module->list, &skl->uuid_list); @@ -323,6 +326,10 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, } return 0; + +free_uuid_list: + skl_freeup_uuid_list(skl); + return ret; } void skl_freeup_uuid_list(struct skl_sst *ctx) From ac6864737c0240499f1cd0689cd61b2bad6e04c8 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 8 Nov 2017 12:02:25 +0200 Subject: [PATCH 0684/3326] dmaengine: ti-dma-crossbar: Correct am335x/am43xx mux value type [ Upstream commit 288e7560e4d3e259aa28f8f58a8dfe63627a1bf6 ] The used 0x1f mask is only valid for am335x family of SoC, different family using this type of crossbar might have different number of electable events. In case of am43xx family 0x3f mask should have been used for example. Instead of trying to handle each family's mask, just use u8 type to store the mux value since the event offsets are aligned to byte offset. Fixes: 42dbdcc6bf965 ("dmaengine: ti-dma-crossbar: Add support for crossbar on AM33xx/AM43xx") Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/dma/ti-dma-crossbar.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index f1d04b70ee672a..7df910e7c34881 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -49,12 +49,12 @@ struct ti_am335x_xbar_data { struct ti_am335x_xbar_map { u16 dma_line; - u16 mux_val; + u8 mux_val; }; -static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val) +static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) { - writeb_relaxed(val & 0x1f, iomem + event); + writeb_relaxed(val, iomem + event); } static void ti_am335x_xbar_free(struct device *dev, void *route_data) @@ -105,7 +105,7 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec, } map->dma_line = (u16)dma_spec->args[0]; - map->mux_val = (u16)dma_spec->args[2]; + map->mux_val = (u8)dma_spec->args[2]; dma_spec->args[2] = 0; dma_spec->args_count = 2; From 540236e28f32749f5d1711020fbcab5be3afbf7f Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 6 Nov 2017 11:11:28 +0000 Subject: [PATCH 0685/3326] mlxsw: spectrum: Fix error return code in mlxsw_sp_port_create() [ Upstream commit d86fd113ebbb37726ef7c7cc6fd6d5ce377455d6 ] Fix to return a negative error code from the VID create error handling case instead of 0, as done elsewhere in this function. Fixes: c57529e1d5d8 ("mlxsw: spectrum: Replace vPorts with Port-VLAN") Signed-off-by: Wei Yongjun Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 696b99e65a5a60..db38880f54b4c6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2974,6 +2974,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, if (IS_ERR(mlxsw_sp_port_vlan)) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", mlxsw_sp_port->local_port); + err = PTR_ERR(mlxsw_sp_port_vlan); goto err_port_vlan_get; } From 27a12c783e700be095c39abe2749a20c3ab8ea11 Mon Sep 17 00:00:00 2001 From: Qiang Date: Thu, 28 Sep 2017 11:54:34 +0800 Subject: [PATCH 0686/3326] PCI/PME: Handle invalid data when reading Root Status [ Upstream commit 3ad3f8ce50914288731a3018b27ee44ab803e170 ] PCIe PME and native hotplug share the same interrupt number, so hotplug interrupts are also processed by PME. In some cases, e.g., a Link Down interrupt, a device may be present but unreachable, so when we try to read its Root Status register, the read fails and we get all ones data (0xffffffff). Previously, we interpreted that data as PCI_EXP_RTSTA_PME being set, i.e., "some device has asserted PME," so we scheduled pcie_pme_work_fn(). This caused an infinite loop because pcie_pme_work_fn() tried to handle PME requests until PCI_EXP_RTSTA_PME is cleared, but with the link down, PCI_EXP_RTSTA_PME can't be cleared. Check for the invalid 0xffffffff data everywhere we read the Root Status register. 1469d17dd341 ("PCI: pciehp: Handle invalid data when reading from non-existent devices") added similar checks in the hotplug driver. Signed-off-by: Qiang Zheng [bhelgaas: changelog, also check in pcie_pme_work_fn(), use "~0" to follow other similar checks] Signed-off-by: Bjorn Helgaas Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/pci/pcie/pme.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index fafdb165dd2ed6..df290aa58dce93 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -226,6 +226,9 @@ static void pcie_pme_work_fn(struct work_struct *work) break; pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); + if (rtsta == (u32) ~0) + break; + if (rtsta & PCI_EXP_RTSTA_PME) { /* * Clear PME status of the port. If there are other @@ -273,7 +276,7 @@ static irqreturn_t pcie_pme_irq(int irq, void *context) spin_lock_irqsave(&data->lock, flags); pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); - if (!(rtsta & PCI_EXP_RTSTA_PME)) { + if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) { spin_unlock_irqrestore(&data->lock, flags); return IRQ_NONE; } From f9565e1e078329da7c92dd6812685bf8a2dd9b8e Mon Sep 17 00:00:00 2001 From: Shriya Date: Fri, 13 Oct 2017 10:06:41 +0530 Subject: [PATCH 0687/3326] powerpc/powernv/cpufreq: Fix the frequency read by /proc/cpuinfo [ Upstream commit cd77b5ce208c153260ed7882d8910f2395bfaabd ] The call to /proc/cpuinfo in turn calls cpufreq_quick_get() which returns the last frequency requested by the kernel, but may not reflect the actual frequency the processor is running at. This patch makes a call to cpufreq_get() instead which returns the current frequency reported by the hardware. Fixes: fb5153d05a7d ("powerpc: powernv: Implement ppc_md.get_proc_freq()") Signed-off-by: Shriya Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/platforms/powernv/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index bbb73aa0eb8f04..bfe2aa702973ac 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -319,7 +319,7 @@ static unsigned long pnv_get_proc_freq(unsigned int cpu) { unsigned long ret_freq; - ret_freq = cpufreq_quick_get(cpu) * 1000ul; + ret_freq = cpufreq_get(cpu) * 1000ul; /* * If the backend cpufreq driver does not exist, From fe1c1819b4340f53ac2da07991c810fceaf06bba Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Fri, 13 Oct 2017 21:35:43 +0300 Subject: [PATCH 0688/3326] PCI: Do not allocate more buses than available in parent [ Upstream commit a20c7f36bd3d20d245616ae223bb9d05dfb6f050 ] One can ask more buses to be reserved for hotplug bridges by passing pci=hpbussize=N in the kernel command line. If the parent bus does not have enough bus space available we incorrectly create child bus with the requested number of subordinate buses. In the example below hpbussize is set to one more than we have available buses in the root port: pci 0000:07:00.0: [8086:1578] type 01 class 0x060400 pci 0000:07:00.0: scanning [bus 00-00] behind bridge, pass 0 pci 0000:07:00.0: bridge configuration invalid ([bus 00-00]), reconfiguring pci 0000:07:00.0: scanning [bus 00-00] behind bridge, pass 1 pci_bus 0000:08: busn_res: can not insert [bus 08-ff] under [bus 07-3f] (conflicts with (null) [bus 07-3f]) pci_bus 0000:08: scanning bus ... pci_bus 0000:0a: bus scan returning with max=40 pci_bus 0000:0a: busn_res: [bus 0a-ff] end is updated to 40 pci_bus 0000:0a: [bus 0a-40] partially hidden behind bridge 0000:07 [bus 07-3f] pci_bus 0000:08: bus scan returning with max=40 pci_bus 0000:08: busn_res: [bus 08-ff] end is updated to 40 Instead of allowing this, limit the subordinate number to be less than or equal the maximum subordinate number allocated for the parent bus (if it has any). Signed-off-by: Mika Westerberg [bhelgaas: remove irrelevant dmesg messages] Signed-off-by: Bjorn Helgaas Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/pci/probe.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ff94b69738a87a..f285cd74088ec4 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1076,7 +1076,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) child = pci_add_new_bus(bus, dev, max+1); if (!child) goto out; - pci_bus_insert_busn_res(child, max+1, 0xff); + pci_bus_insert_busn_res(child, max+1, + bus->busn_res.end); } max++; buses = (buses & 0xff000000) @@ -2433,6 +2434,10 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus) if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) { if (max - bus->busn_res.start < pci_hotplug_bus_size - 1) max = bus->busn_res.start + pci_hotplug_bus_size - 1; + + /* Do not allocate more buses than we have room left */ + if (max > bus->busn_res.end) + max = bus->busn_res.end; } /* From 837da470dc16168a4ea75989b0f902efe795dda4 Mon Sep 17 00:00:00 2001 From: Matthias Brugger Date: Mon, 30 Oct 2017 12:37:55 +0100 Subject: [PATCH 0689/3326] iommu/mediatek: Fix driver name [ Upstream commit 395df08d2e1de238a9c8c33fdcd0e2160efd63a9 ] There exist two Mediatek iommu drivers for the two different generations of the device. But both drivers have the same name "mtk-iommu". This breaks the registration of the second driver: Error: Driver 'mtk-iommu' is already registered, aborting... Fix this by changing the name for first generation to "mtk-iommu-v1". Fixes: b17336c55d89 ("iommu/mediatek: add support for mtk iommu generation one HW") Signed-off-by: Matthias Brugger Signed-off-by: Alex Williamson Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/iommu/mtk_iommu_v1.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index bc1efbfb9ddf0f..542930cd183d07 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -708,7 +708,7 @@ static struct platform_driver mtk_iommu_driver = { .probe = mtk_iommu_probe, .remove = mtk_iommu_remove, .driver = { - .name = "mtk-iommu", + .name = "mtk-iommu-v1", .of_match_table = mtk_iommu_of_ids, .pm = &mtk_iommu_pm_ops, } From c4e7af283374329a6eca97dfa782d710417f3e19 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Sat, 4 Nov 2017 23:52:54 -0500 Subject: [PATCH 0690/3326] thunderbolt: tb: fix use after free in tb_activate_pcie_devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit a2e373438f72391493a4425efc1b82030b6b4fd5 ] Add a ̣̣continue statement in order to avoid using a previously free'd pointer tunnel in list_add. Addresses-Coverity-ID: 1415336 Fixes: 9d3cce0b6136 ("thunderbolt: Introduce thunderbolt bus and connection manager") Signed-off-by: Gustavo A. R. Silva Acked-by: Mika Westerberg Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/thunderbolt/tb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index d674e06767a560..1424581fd9af5c 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -225,6 +225,7 @@ static void tb_activate_pcie_devices(struct tb *tb) tb_port_info(up_port, "PCIe tunnel activation failed, aborting\n"); tb_pci_free(tunnel); + continue; } list_add(&tunnel->list, &tcm->tunnel_list); From d815f4efbb2a04e57632fc4daa6ebaf5fca52123 Mon Sep 17 00:00:00 2001 From: KUWAZAWA Takuya Date: Sun, 15 Oct 2017 20:54:10 +0900 Subject: [PATCH 0691/3326] netfilter: ipvs: Fix inappropriate output of procfs [ Upstream commit c5504f724c86ee925e7ffb80aa342cfd57959b13 ] Information about ipvs in different network namespace can be seen via procfs. How to reproduce: # ip netns add ns01 # ip netns add ns02 # ip netns exec ns01 ip a add dev lo 127.0.0.1/8 # ip netns exec ns02 ip a add dev lo 127.0.0.1/8 # ip netns exec ns01 ipvsadm -A -t 10.1.1.1:80 # ip netns exec ns02 ipvsadm -A -t 10.1.1.2:80 The ipvsadm displays information about its own network namespace only. # ip netns exec ns01 ipvsadm -Ln IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP 10.1.1.1:80 wlc # ip netns exec ns02 ipvsadm -Ln IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP 10.1.1.2:80 wlc But I can see information about other network namespace via procfs. # ip netns exec ns01 cat /proc/net/ip_vs IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP 0A010101:0050 wlc TCP 0A010102:0050 wlc # ip netns exec ns02 cat /proc/net/ip_vs IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP 0A010102:0050 wlc Signed-off-by: KUWAZAWA Takuya Acked-by: Julian Anastasov Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/netfilter/ipvs/ip_vs_ctl.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 4f940d7eb2f7e4..b3245f9a37d147 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2034,12 +2034,16 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) seq_puts(seq, " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n"); } else { + struct net *net = seq_file_net(seq); + struct netns_ipvs *ipvs = net_ipvs(net); const struct ip_vs_service *svc = v; const struct ip_vs_iter *iter = seq->private; const struct ip_vs_dest *dest; struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); char *sched_name = sched ? sched->name : "none"; + if (svc->ipvs != ipvs) + return 0; if (iter->table == ip_vs_svc_table) { #ifdef CONFIG_IP_VS_IPV6 if (svc->af == AF_INET6) From 2637cb1e3d8086cd31a96e60c3e7e0aa74c043f4 Mon Sep 17 00:00:00 2001 From: "William A. Kennington III" Date: Fri, 22 Sep 2017 16:58:00 -0700 Subject: [PATCH 0692/3326] powerpc/opal: Fix EBUSY bug in acquiring tokens [ Upstream commit 71e24d7731a2903b1ae2bba2b2971c654d9c2aa6 ] The current code checks the completion map to look for the first token that is complete. In some cases, a completion can come in but the token can still be on lease to the caller processing the completion. If this completed but unreleased token is the first token found in the bitmap by another tasks trying to acquire a token, then the __test_and_set_bit call will fail since the token will still be on lease. The acquisition will then fail with an EBUSY. This patch reorganizes the acquisition code to look at the opal_async_token_map for an unleased token. If the token has no lease it must have no outstanding completions so we should never see an EBUSY, unless we have leased out too many tokens. Since opal_async_get_token_inrerruptible is protected by a semaphore, we will practically never see EBUSY anymore. Fixes: 8d7248232208 ("powerpc/powernv: Infrastructure to support OPAL async completion") Signed-off-by: William A. Kennington III Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/platforms/powernv/opal-async.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c index cf33769a7b7252..45b3feb8aa2f77 100644 --- a/arch/powerpc/platforms/powernv/opal-async.c +++ b/arch/powerpc/platforms/powernv/opal-async.c @@ -39,18 +39,18 @@ int __opal_async_get_token(void) int token; spin_lock_irqsave(&opal_async_comp_lock, flags); - token = find_first_bit(opal_async_complete_map, opal_max_async_tokens); + token = find_first_zero_bit(opal_async_token_map, opal_max_async_tokens); if (token >= opal_max_async_tokens) { token = -EBUSY; goto out; } - if (__test_and_set_bit(token, opal_async_token_map)) { + if (!__test_and_clear_bit(token, opal_async_complete_map)) { token = -EBUSY; goto out; } - __clear_bit(token, opal_async_complete_map); + __set_bit(token, opal_async_token_map); out: spin_unlock_irqrestore(&opal_async_comp_lock, flags); From 0fe5286e49d4e1fe7eba347302b3d3dee217eade Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 18 Oct 2017 11:16:47 +0200 Subject: [PATCH 0693/3326] powerpc/ipic: Fix status get and status clear [ Upstream commit 6b148a7ce72a7f87c81cbcde48af014abc0516a9 ] IPIC Status is provided by register IPIC_SERSR and not by IPIC_SERMR which is the mask register. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/sysdev/ipic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 16f1edd78c40e5..535cf1f6941c01 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -846,12 +846,12 @@ void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq) u32 ipic_get_mcp_status(void) { - return ipic_read(primary_ipic->regs, IPIC_SERMR); + return ipic_read(primary_ipic->regs, IPIC_SERSR); } void ipic_clear_mcp_status(u32 mask) { - ipic_write(primary_ipic->regs, IPIC_SERMR, mask); + ipic_write(primary_ipic->regs, IPIC_SERSR, mask); } /* Return an interrupt vector or 0 if no interrupt is pending. */ From f8b31d88a698909f0bcc4142e4637e2baee5c320 Mon Sep 17 00:00:00 2001 From: Tyrel Datwyler Date: Thu, 28 Sep 2017 20:19:20 -0400 Subject: [PATCH 0694/3326] powerpc/pseries/vio: Dispose of virq mapping on vdevice unregister [ Upstream commit b8f89fea599d91e674497aad572613eb63181f31 ] When a vdevice is DLPAR removed from the system the vio subsystem doesn't bother unmapping the virq from the irq_domain. As a result we have a virq mapped to a hardware irq that is no longer valid for the irq_domain. A side effect is that we are left with /proc/irq/ affinity entries, and attempts to modify the smp_affinity of the irq will fail. In the following observed example the kernel log is spammed by ics_rtas_set_affinity errors after the removal of a VSCSI adapter. This is a result of irqbalance trying to adjust the affinity every 10 seconds. rpadlpar_io: slot U8408.E8E.10A7ACV-V5-C25 removed ics_rtas_set_affinity: ibm,set-xive irq=655385 returns -3 ics_rtas_set_affinity: ibm,set-xive irq=655385 returns -3 This patch fixes the issue by calling irq_dispose_mapping() on the virq of the viodev on unregister. Fixes: f2ab6219969f ("powerpc/pseries: Add PFO support to the VIO bus") Signed-off-by: Tyrel Datwyler Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/platforms/pseries/vio.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 12277bc9fd9ebd..d86938260a8675 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -1592,6 +1592,8 @@ ATTRIBUTE_GROUPS(vio_dev); void vio_unregister_device(struct vio_dev *viodev) { device_unregister(&viodev->dev); + if (viodev->family == VDEVICE) + irq_dispose_mapping(viodev->irq); } EXPORT_SYMBOL(vio_unregister_device); From c90db58f5bb6d5ba203815e28cb5e7eb20b86b27 Mon Sep 17 00:00:00 2001 From: Kuppuswamy Sathyanarayanan Date: Sun, 29 Oct 2017 02:49:54 -0700 Subject: [PATCH 0695/3326] platform/x86: intel_punit_ipc: Fix resource ioremap warning [ Upstream commit 6cc8cbbc8868033f279b63e98b26b75eaa0006ab ] For PUNIT device, ISPDRIVER_IPC and GTDDRIVER_IPC resources are not mandatory. So when PMC IPC driver creates a PUNIT device, if these resources are not available then it creates dummy resource entries for these missing resources. But during PUNIT device probe, doing ioremap on these dummy resources generates following warning messages. intel_punit_ipc: can't request region for resource [mem 0x00000000] intel_punit_ipc: can't request region for resource [mem 0x00000000] intel_punit_ipc: can't request region for resource [mem 0x00000000] intel_punit_ipc: can't request region for resource [mem 0x00000000] This patch fixes this issue by adding extra check for resource size before performing ioremap operation. Signed-off-by: Kuppuswamy Sathyanarayanan Signed-off-by: Andy Shevchenko Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/intel_punit_ipc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c index a47a41fc10ad77..b5b890127479f8 100644 --- a/drivers/platform/x86/intel_punit_ipc.c +++ b/drivers/platform/x86/intel_punit_ipc.c @@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev) * - GTDRIVER_IPC BASE_IFACE */ res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr; } res = platform_get_resource(pdev, IORESOURCE_MEM, 3); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr; } res = platform_get_resource(pdev, IORESOURCE_MEM, 4); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr; } res = platform_get_resource(pdev, IORESOURCE_MEM, 5); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr; From 96d275b841efc6decf61afafc1703f0e9b92253d Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 31 Oct 2017 11:03:18 -0700 Subject: [PATCH 0696/3326] target/iscsi: Detect conn_cmd_list corruption early [ Upstream commit 6eaf69e4ec075f5af236c0c89f75639a195db904 ] Certain behavior of the initiator can cause the target driver to send both a reject and a SCSI response. If that happens two target_put_sess_cmd() calls will occur without the command having been removed from conn_cmd_list. In other words, conn_cmd_list will get corrupted once the freed memory is reused. Although the Linux kernel can detect list corruption if list debugging is enabled, in this case the context in which list corruption is detected is not related to the context that caused list corruption. Hence add WARN_ON() statements that report the context that is causing list corruption. Signed-off-by: Bart Van Assche Cc: Christoph Hellwig Cc: Mike Christie Reviewed-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/target/iscsi/iscsi_target_util.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 1e36f83b596164..70c6b9bfc04ee8 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -694,6 +694,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) struct iscsi_session *sess; struct se_cmd *se_cmd = &cmd->se_cmd; + WARN_ON(!list_empty(&cmd->i_conn_node)); + if (cmd->conn) sess = cmd->conn->sess; else @@ -716,6 +718,8 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues) { struct iscsi_conn *conn = cmd->conn; + WARN_ON(!list_empty(&cmd->i_conn_node)); + if (cmd->data_direction == DMA_TO_DEVICE) { iscsit_stop_dataout_timer(cmd); iscsit_free_r2ts_from_list(cmd); From 68d702c6689d83000060118f940e0adce88e42aa Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 31 Oct 2017 11:03:17 -0700 Subject: [PATCH 0697/3326] target/iscsi: Fix a race condition in iscsit_add_reject_from_cmd() [ Upstream commit cfe2b621bb18d86e93271febf8c6e37622da2d14 ] Avoid that cmd->se_cmd.se_tfo is read after a command has already been freed. Signed-off-by: Bart Van Assche Cc: Christoph Hellwig Cc: Mike Christie Reviewed-by: Hannes Reinecke Signed-off-by: Nicholas Bellinger Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/target/iscsi/iscsi_target.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index d9ba4ee2c62b4b..52fa52c20be055 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -834,6 +834,7 @@ static int iscsit_add_reject_from_cmd( unsigned char *buf) { struct iscsi_conn *conn; + const bool do_put = cmd->se_cmd.se_tfo != NULL; if (!cmd->conn) { pr_err("cmd->conn is NULL for ITT: 0x%08x\n", @@ -864,7 +865,7 @@ static int iscsit_add_reject_from_cmd( * Perform the kref_put now if se_cmd has already been setup by * scsit_setup_scsi_cmd() */ - if (cmd->se_cmd.se_tfo != NULL) { + if (do_put) { pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n"); target_put_sess_cmd(&cmd->se_cmd); } From e389507ad355226019147985c29a5b4282aa1e4d Mon Sep 17 00:00:00 2001 From: tangwenji Date: Fri, 15 Sep 2017 16:03:13 +0800 Subject: [PATCH 0698/3326] iscsi-target: fix memory leak in lio_target_tiqn_addtpg() [ Upstream commit 12d5a43b2dffb6cd28062b4e19024f7982393288 ] tpg must free when call core_tpg_register() return fail Signed-off-by: tangwenji Signed-off-by: Nicholas Bellinger Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/target/iscsi/iscsi_target_configfs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 0dd4c45f7575a2..0ebc4818e132ad 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1123,7 +1123,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg( ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI); if (ret < 0) - return NULL; + goto free_out; ret = iscsit_tpg_add_portal_group(tiqn, tpg); if (ret != 0) @@ -1135,6 +1135,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg( return &tpg->tpg_se_tpg; out: core_tpg_deregister(&tpg->tpg_se_tpg); +free_out: kfree(tpg); return NULL; } From 23d3f106f88ca21fd2911ae2f3a4f8aa7b56fddc Mon Sep 17 00:00:00 2001 From: tangwenji Date: Thu, 24 Aug 2017 19:59:37 +0800 Subject: [PATCH 0699/3326] target:fix condition return in core_pr_dump_initiator_port() [ Upstream commit 24528f089d0a444070aa4f715ace537e8d6bf168 ] When is pr_reg->isid_present_at_reg is false,this function should return. This fixes a regression originally introduced by: commit d2843c173ee53cf4c12e7dfedc069a5bc76f0ac5 Author: Andy Grover Date: Thu May 16 10:40:55 2013 -0700 target: Alter core_pr_dump_initiator_port for ease of use Signed-off-by: tangwenji Signed-off-by: Nicholas Bellinger Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/target/target_core_pr.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 9f25c9c6f67da5..4ba5004a069e97 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -58,8 +58,10 @@ void core_pr_dump_initiator_port( char *buf, u32 size) { - if (!pr_reg->isid_present_at_reg) + if (!pr_reg->isid_present_at_reg) { buf[0] = '\0'; + return; + } snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid); } From f6a341e87c656f1e2e9959cfdd2d02561724558f Mon Sep 17 00:00:00 2001 From: Jiang Yi Date: Fri, 11 Aug 2017 11:29:44 +0800 Subject: [PATCH 0700/3326] target/file: Do not return error for UNMAP if length is zero [ Upstream commit 594e25e73440863981032d76c9b1e33409ceff6e ] The function fd_execute_unmap() in target_core_file.c calles ret = file->f_op->fallocate(file, mode, pos, len); Some filesystems implement fallocate() to return error if length is zero (e.g. btrfs) but according to SCSI Block Commands spec UNMAP should return success for zero length. Signed-off-by: Jiang Yi Signed-off-by: Nicholas Bellinger Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/target/target_core_file.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index c629817a8854be..9b2c0c773022c0 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -482,6 +482,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) struct inode *inode = file->f_mapping->host; int ret; + if (!nolb) { + return 0; + } + if (cmd->se_dev->dev_attrib.pi_prot_type) { ret = fd_do_prot_unmap(cmd, lba, nolb); if (ret) From ddf2588a05c820b305bc6db2fe6e498a3bc6fde9 Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Fri, 3 Nov 2017 11:24:44 -0600 Subject: [PATCH 0701/3326] badblocks: fix wrong return value in badblocks_set if badblocks are disabled [ Upstream commit 39b4954c0a1556f8f7f1fdcf59a227117fcd8a0b ] MD's rdev_set_badblocks() expects that badblocks_set() returns 1 if badblocks are disabled, otherwise, rdev_set_badblocks() will record superblock changes and return success in that case and md will fail to report an IO error which it should. This bug has existed since badblocks were introduced in commit 9e0e252a048b ("badblocks: Add core badblock management code"). Signed-off-by: Liu Bo Acked-by: Guoqing Jiang Signed-off-by: Shaohua Li Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- block/badblocks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/badblocks.c b/block/badblocks.c index 43c71166e1e2a6..91f7bcf979d374 100644 --- a/block/badblocks.c +++ b/block/badblocks.c @@ -178,7 +178,7 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors, if (bb->shift < 0) /* badblocks are disabled */ - return 0; + return 1; if (bb->shift) { /* round the start down, and the end up */ From 07a93b48a0f73e0cb74355a7dd80c6ad55bed674 Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Fri, 3 Nov 2017 10:50:34 -0600 Subject: [PATCH 0702/3326] iommu/amd: Limit the IOVA page range to the specified addresses [ Upstream commit b92b4fb5c14257c0e7eae291ecc1f7b1962e1699 ] The extent of pages specified when applying a reserved region should include up to the last page of the range, but not the page following the range. Signed-off-by: Gary R Hook Fixes: 8d54d6c8b8f3 ('iommu/amd: Implement apply_dm_region call-back') Signed-off-by: Alex Williamson Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/iommu/amd_iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 8e8874d23717ab..99a2a57b6cfdae 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3155,7 +3155,7 @@ static void amd_iommu_apply_resv_region(struct device *dev, unsigned long start, end; start = IOVA_PFN(region->start); - end = IOVA_PFN(region->start + region->length); + end = IOVA_PFN(region->start + region->length - 1); WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); } From 79f41e0f8ae0e3298bfcfd8c085b5242add83e9b Mon Sep 17 00:00:00 2001 From: Eryu Guan Date: Wed, 1 Nov 2017 21:43:50 -0700 Subject: [PATCH 0703/3326] xfs: truncate pagecache before writeback in xfs_setattr_size() [ Upstream commit 350976ae21873b0d36584ea005076356431b8f79 ] On truncate down, if new size is not block size aligned, we zero the rest of block to avoid exposing stale data to user, and iomap_truncate_page() skips zeroing if the range is already in unwritten state or a hole. Then we writeback from on-disk i_size to the new size if this range hasn't been written to disk yet, and truncate page cache beyond new EOF and set in-core i_size. The problem is that we could write data between di_size and newsize before removing the page cache beyond newsize, as the extents may still be in unwritten state right after a buffer write. As such, the page of data that newsize lies in has not been zeroed by page cache invalidation before it is written, and xfs_do_writepage() hasn't triggered it's "zero data beyond EOF" case because we haven't updated in-core i_size yet. Then a subsequent mmap read could see non-zeros past EOF. I occasionally see this in fsx runs in fstests generic/112, a simplified fsx operation sequence is like (assuming 4k block size xfs): fallocate 0x0 0x1000 0x0 keep_size write 0x0 0x1000 0x0 truncate 0x0 0x800 0x1000 punch_hole 0x0 0x800 0x800 mapread 0x0 0x800 0x800 where fallocate allocates unwritten extent but doesn't update i_size, buffer write populates the page cache and extent is still unwritten, truncate skips zeroing page past new EOF and writes the page to disk, punch_hole invalidates the page cache, at last mapread reads the block back and sees non-zero beyond EOF. Fix it by moving truncate_setsize() to before writeback so the page cache invalidation zeros the partial page at the new EOF. This also triggers "zero data beyond EOF" in xfs_do_writepage() at writeback time, because newsize has been set and page straddles the newsize. Also fixed the wrong 'end' param of filemap_write_and_wait_range() call while we're at it, the 'end' is inclusive and should be 'newsize - 1'. Suggested-by: Dave Chinner Signed-off-by: Eryu Guan Acked-by: Dave Chinner Reviewed-by: Brian Foster Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/xfs/xfs_iops.c | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 17081c77ef86ed..f24e5b6cfc867a 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -885,22 +885,6 @@ xfs_setattr_size( if (error) return error; - /* - * We are going to log the inode size change in this transaction so - * any previous writes that are beyond the on disk EOF and the new - * EOF that have not been written out need to be written here. If we - * do not write the data out, we expose ourselves to the null files - * problem. Note that this includes any block zeroing we did above; - * otherwise those blocks may not be zeroed after a crash. - */ - if (did_zeroing || - (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) { - error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, - ip->i_d.di_size, newsize); - if (error) - return error; - } - /* * We've already locked out new page faults, so now we can safely remove * pages from the page cache knowing they won't get refaulted until we @@ -917,9 +901,29 @@ xfs_setattr_size( * user visible changes). There's not much we can do about this, except * to hope that the caller sees ENOMEM and retries the truncate * operation. + * + * And we update in-core i_size and truncate page cache beyond newsize + * before writeback the [di_size, newsize] range, so we're guaranteed + * not to write stale data past the new EOF on truncate down. */ truncate_setsize(inode, newsize); + /* + * We are going to log the inode size change in this transaction so + * any previous writes that are beyond the on disk EOF and the new + * EOF that have not been written out need to be written here. If we + * do not write the data out, we expose ourselves to the null files + * problem. Note that this includes any block zeroing we did above; + * otherwise those blocks may not be zeroed after a crash. + */ + if (did_zeroing || + (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) { + error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, + ip->i_d.di_size, newsize - 1); + if (error) + return error; + } + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); if (error) return error; From ade07fa32f5ab083d558199e906d612b5d535553 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Fri, 3 Nov 2017 11:45:18 +0000 Subject: [PATCH 0704/3326] arm-ccn: perf: Prevent module unload while PMU is in use [ Upstream commit c7f5828bf77dcbd61d51f4736c1d5aa35663fbb4 ] When the PMU driver is built as a module, the perf expects the pmu->module to be valid, so that the driver is prevented from being unloaded while it is in use. Fix the CCN pmu driver to fill in this field. Fixes: a33b0daab73a0 ("bus: ARM CCN PMU driver") Cc: Pawel Moll Cc: Will Deacon Acked-by: Mark Rutland Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/bus/arm-ccn.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index 03d7faf51c2bba..72fd1750134d2d 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -1280,6 +1280,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) /* Perf driver registration */ ccn->dt.pmu = (struct pmu) { + .module = THIS_MODULE, .attr_groups = arm_ccn_pmu_attr_groups, .task_ctx_nr = perf_invalid_context, .event_init = arm_ccn_pmu_event_init, From 6479a108b35857674ca2a74e049e3089d12ee444 Mon Sep 17 00:00:00 2001 From: Robert Baronescu Date: Tue, 10 Oct 2017 13:22:00 +0300 Subject: [PATCH 0705/3326] crypto: tcrypt - fix buffer lengths in test_aead_speed() [ Upstream commit 7aacbfcb331ceff3ac43096d563a1f93ed46e35e ] Fix the way the length of the buffers used for encryption / decryption are computed. For e.g. in case of encryption, input buffer does not contain an authentication tag. Signed-off-by: Robert Baronescu Signed-off-by: Herbert Xu Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- crypto/tcrypt.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 0022a18d36eeb4..f5f58a6eee5ddc 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -340,7 +340,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, } sg_init_aead(sg, xbuf, - *b_size + (enc ? authsize : 0)); + *b_size + (enc ? 0 : authsize)); sg_init_aead(sgout, xoutbuf, *b_size + (enc ? authsize : 0)); @@ -348,7 +348,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, sg_set_buf(&sg[0], assoc, aad_size); sg_set_buf(&sgout[0], assoc, aad_size); - aead_request_set_crypt(req, sg, sgout, *b_size, iv); + aead_request_set_crypt(req, sg, sgout, + *b_size + (enc ? 0 : authsize), + iv); aead_request_set_ad(req, aad_size); if (secs) From f58a90e027d8ff3f24e20429500f0923663afbd4 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 3 Nov 2017 12:21:21 +0100 Subject: [PATCH 0706/3326] mm: Handle 0 flags in _calc_vm_trans() macro [ Upstream commit 592e254502041f953e84d091eae2c68cba04c10b ] _calc_vm_trans() does not handle the situation when some of the passed flags are 0 (which can happen if these VM flags do not make sense for the architecture). Improve the _calc_vm_trans() macro to return 0 in such situation. Since all passed flags are constant, this does not add any runtime overhead. Signed-off-by: Jan Kara Signed-off-by: Dan Williams Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/linux/mman.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/mman.h b/include/linux/mman.h index 7c87b6652244f1..835a6f2fd8d4f3 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -64,8 +64,9 @@ static inline bool arch_validate_prot(unsigned long prot) * ("bit1" and "bit2" must be single bits) */ #define _calc_vm_trans(x, bit1, bit2) \ + ((!(bit1) || !(bit2)) ? 0 : \ ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \ - : ((x) & (bit1)) / ((bit1) / (bit2))) + : ((x) & (bit1)) / ((bit1) / (bit2)))) /* * Combine the mmap "prot" argument into "vm_flags" used internally. From 62785073959347a14eecb395e810a97e745996f2 Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Fri, 3 Nov 2017 12:18:26 +0800 Subject: [PATCH 0707/3326] net: hns3: fix for getting advertised_caps in hns3_get_link_ksettings [ Upstream commit 2b39cabb2a283cea0c3d96d9370575371726164f ] This patch fixes a bug for ethtool's get_link_ksettings(). The advertising for autoneg is always added to advertised_caps whether autoneg is enable or disable. This patch fixes it. Fixes: 496d03e (net: hns3: Add Ethtool support to HNS3 driver) Signed-off-by: Fuyun Liang Signed-off-by: Lipeng Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index d636399232fb1d..e590d96e434a19 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -375,6 +375,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev, break; } + if (!cmd->base.autoneg) + advertised_caps &= ~HNS3_LM_AUTONEG_BIT; + /* now, map driver link modes to ethtool link modes */ hns3_driv_to_eth_caps(supported_caps, cmd, false); hns3_driv_to_eth_caps(advertised_caps, cmd, true); From 7018a57cf7018dca6469d73b485d4a35cea97649 Mon Sep 17 00:00:00 2001 From: qumingguang Date: Thu, 2 Nov 2017 20:45:22 +0800 Subject: [PATCH 0708/3326] net: hns3: Fix a misuse to devm_free_irq [ Upstream commit ae064e6123f89f90af7e4ea190cc0c612643ca93 ] we should use free_irq to free the nic irq during the unloading time. because we use request_irq to apply it when nic up. It will crash if up net device after reset the port. This patch fixes the issue. Signed-off-by: qumingguang Signed-off-by: Lipeng Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 35369e1c8036f3..981fc62ce04343 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -2460,9 +2460,8 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) (void)irq_set_affinity_hint( priv->tqp_vector[i].vector_irq, NULL); - devm_free_irq(&pdev->dev, - priv->tqp_vector[i].vector_irq, - &priv->tqp_vector[i]); + free_irq(priv->tqp_vector[i].vector_irq, + &priv->tqp_vector[i]); } priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; From 3c5fed838b473e565e44ada69ff51e07317e2c8a Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 2 Nov 2017 10:30:11 +0100 Subject: [PATCH 0709/3326] staging: rtl8188eu: Revert part of "staging: rtl8188eu: fix comments with lines over 80 characters" [ Upstream commit 4004a9870bbefdb6644c3d2033f5315920a3b669 ] Commit 74e1e498e84e ("staging: rtl8188eu: fix comments with lines over 80 characters") not only changed comments but also changed an if check: -if (pmlmepriv->cur_network.join_res != true) { +if (!(pmlmepriv->cur_network.join_res)) { This is not equivalent as join_res is an int and can have values such as -2 and -3. Note for the next time, please only make one type of changes in a single clean-up commit. Fixes: 74e1e498e84e ("staging: rtl8188eu: fix comments with lines over 80 ...") Cc: Juliana Rodrigues Signed-off-by: Hans de Goede Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtl8188eu/core/rtw_ap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c index 32a483769975bb..fa611455109ae6 100644 --- a/drivers/staging/rtl8188eu/core/rtw_ap.c +++ b/drivers/staging/rtl8188eu/core/rtw_ap.c @@ -754,7 +754,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf) } /* setting only at first time */ - if (!(pmlmepriv->cur_network.join_res)) { + if (pmlmepriv->cur_network.join_res != true) { /* WEP Key will be set before this function, do not * clear CAM. */ From 3c38ce8767c6203bf3d479a570ece2a38502ac6b Mon Sep 17 00:00:00 2001 From: Chen Zhong Date: Thu, 5 Oct 2017 11:50:23 +0800 Subject: [PATCH 0710/3326] clk: mediatek: add the option for determining PLL source clock [ Upstream commit c955bf3998efa3355790a4d8c82874582f1bc727 ] Since the previous setup always sets the PLL using crystal 26MHz, this doesn't always happen in every MediaTek platform. So the patch added flexibility for assigning extra member for determining the PLL source clock. Signed-off-by: Chen Zhong Signed-off-by: Sean Wang Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clk/mediatek/clk-mtk.h | 1 + drivers/clk/mediatek/clk-pll.c | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h index f5d6b70ce18937..210ce8e8025eee 100644 --- a/drivers/clk/mediatek/clk-mtk.h +++ b/drivers/clk/mediatek/clk-mtk.h @@ -216,6 +216,7 @@ struct mtk_pll_data { uint32_t pcw_reg; int pcw_shift; const struct mtk_pll_div_table *div_table; + const char *parent_name; }; void mtk_clk_register_plls(struct device_node *node, diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c index a409142e93462d..7598477ff60f15 100644 --- a/drivers/clk/mediatek/clk-pll.c +++ b/drivers/clk/mediatek/clk-pll.c @@ -303,7 +303,10 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data, init.name = data->name; init.flags = (data->flags & PLL_AO) ? CLK_IS_CRITICAL : 0; init.ops = &mtk_pll_ops; - init.parent_names = &parent_name; + if (data->parent_name) + init.parent_names = &data->parent_name; + else + init.parent_names = &parent_name; init.num_parents = 1; clk = clk_register(NULL, &pll->hw); From 27731e18a571b07c4042d83679184c78957bac1a Mon Sep 17 00:00:00 2001 From: Adriana Reus Date: Mon, 2 Oct 2017 13:32:10 +0300 Subject: [PATCH 0711/3326] clk: imx: imx7d: Fix parent clock for OCRAM_CLK [ Upstream commit edc5a8e754aba9c6eaeddd18cb1e72462f99b16c ] The parent of OCRAM_CLK should be axi_main_root_clk and not axi_post_div. before: axi_src 1 1 332307692 0 0 axi_cg 1 1 332307692 0 0 axi_pre_div 1 1 332307692 0 0 axi_post_div 1 1 332307692 0 0 ocram_clk 0 0 332307692 0 0 main_axi_root_clk 1 1 332307692 0 0 after: axi_src 1 1 332307692 0 0 axi_cg 1 1 332307692 0 0 axi_pre_div 1 1 332307692 0 0 axi_post_div 1 1 332307692 0 0 main_axi_root_clk 1 1 332307692 0 0 ocram_clk 0 0 332307692 0 0 Reference Doc: i.MX 7D Reference Manual - Chap 5, p 516 (https://www.nxp.com/docs/en/reference-manual/IMX7DRM.pdf) Fixes: 8f6d8094b215 ("ARM: imx: add imx7d clk tree support") Signed-off-by: Adriana Reus Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clk/imx/clk-imx7d.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index 2305699db46798..0ac9b30c8b9061 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -797,7 +797,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate4("main_axi_root_clk", "axi_post_div", base + 0x4040, 0); clks[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_gate4("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0); clks[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_gate4("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0); - clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "axi_post_div", base + 0x4110, 0); + clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "main_axi_root_clk", base + 0x4110, 0); clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate4("ocram_s_clk", "ahb_root_clk", base + 0x4120, 0); clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate4("dram_root_clk", "dram_post_div", base + 0x4130, 0); clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate4("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0); From de0bbe07a49ce9bf5bc4fc1846bd453151e7a334 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Szymanski?= Date: Tue, 1 Aug 2017 12:40:07 +0200 Subject: [PATCH 0712/3326] clk: imx6: refine hdmi_isfr's parent to make HDMI work on i.MX6 SoCs w/o VPU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit c68ee58d9ee7b856ac722f18f4f26579c8fbd2b4 ] On i.MX6 SoCs without VPU (in my case MCIMX6D4AVT10AC), the hdmi driver fails to probe: [ 2.540030] dwhdmi-imx 120000.hdmi: Unsupported HDMI controller (0000:00:00) [ 2.548199] imx-drm display-subsystem: failed to bind 120000.hdmi (ops dw_hdmi_imx_ops): -19 [ 2.557403] imx-drm display-subsystem: master bind failed: -19 That's because hdmi_isfr's parent, video_27m, is not correctly ungated. As explained in commit 5ccc248cc537 ("ARM: imx6q: clk: Add support for mipi_core_cfg clock as a shared clock gate"), video_27m is gated by CCM_CCGR3[CG8]. On i.MX6 SoCs with VPU, the hdmi is working thanks to the CCM_CMEOR[mod_en_ov_vpu] bit which makes the video_27m ungated whatever is in CCM_CCGR3[CG8]. The issue can be reproduced by setting CCMEOR[mod_en_ov_vpu] to 0. Make the HDMI work in every case by setting hdmi_isfr's parent to mipi_core_cfg. Signed-off-by: Sébastien Szymanski Reviewed-by: Fabio Estevam Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clk/imx/clk-imx6q.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index c07df719b8a35d..8d518ad5dc13e5 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c @@ -761,7 +761,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24); clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26); clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0); - clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "video_27m", base + 0x70, 4); + clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "mipi_core_cfg", base + 0x70, 4); clk[IMX6QDL_CLK_I2C1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6); clk[IMX6QDL_CLK_I2C2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8); clk[IMX6QDL_CLK_I2C3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10); From d9e497c9424896581b18f0807652364180401c7b Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Wed, 1 Nov 2017 08:09:59 -0400 Subject: [PATCH 0713/3326] media: camss-vfe: always initialize reg at vfe_set_xbar_cfg() [ Upstream commit 9917fbcfa20ab987d6381fd0365665e5c1402d75 ] if output->wm_num is bigger than 2, the value for reg is not initialized, as warned by smatch: drivers/media/platform/qcom/camss-8x16/camss-vfe.c:633 vfe_set_xbar_cfg() error: uninitialized symbol 'reg'. drivers/media/platform/qcom/camss-8x16/camss-vfe.c:637 vfe_set_xbar_cfg() error: uninitialized symbol 'reg'. That shouldn't happen in practice, so add a logic that will break the loop if i > 1, fixing the warnings. Signed-off-by: Mauro Carvalho Chehab Acked-by: Todor Tomov Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/media/platform/qcom/camss-8x16/camss-vfe.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c index b22d2dfcd3c29e..55232a9129503e 100644 --- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c +++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c @@ -622,6 +622,9 @@ static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output, reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN; if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16) reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA; + } else { + /* On current devices output->wm_num is always <= 2 */ + break; } if (output->wm_idx[i] % 2 == 1) From 6facfe25e62e702304591bd114bcf7d6067c86e4 Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Fri, 1 Sep 2017 08:47:14 +0800 Subject: [PATCH 0714/3326] clk: hi6220: mark clock cs_atb_syspll as critical [ Upstream commit d2a3671ebe6479483a12f94fcca63c058d95ad64 ] Clock cs_atb_syspll is pll used for coresight trace bus; when clock cs_atb_syspll is disabled and operates its child clock node cs_atb results in system hang. So mark clock cs_atb_syspll as critical to keep it enabled. Cc: Guodong Xu Cc: Zhangfei Gao Cc: Haojian Zhuang Signed-off-by: Leo Yan Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/1504226835-2115-2-git-send-email-leo.yan@linaro.org Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clk/hisilicon/clk-hi6220.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c index e786d717f75dcf..a87809d4bd525a 100644 --- a/drivers/clk/hisilicon/clk-hi6220.c +++ b/drivers/clk/hisilicon/clk-hi6220.c @@ -145,7 +145,7 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = { { HI6220_BBPPLL_SEL, "bbppll_sel", "pll0_bbp_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 9, 0, }, { HI6220_MEDIA_PLL_SRC, "media_pll_src", "pll_media_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 10, 0, }, { HI6220_MMC2_SEL, "mmc2_sel", "mmc2_mux1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 11, 0, }, - { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 12, 0, }, + { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IS_CRITICAL, 0x270, 12, 0, }, }; static struct hisi_mux_clock hi6220_mux_clks_sys[] __initdata = { From 85dcb3c850a79751b2239f434a4b6b36a75a865e Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sat, 14 Oct 2017 17:22:25 +0800 Subject: [PATCH 0715/3326] blk-mq-sched: dispatch from scheduler IFF progress is made in ->dispatch [ Upstream commit 5e3d02bbafad38975099b5848f5ebadedcf7bb7e ] When the hw queue is busy, we shouldn't take requests from the scheduler queue any more, otherwise it is difficult to do IO merge. This patch fixes the awful IO performance on some SCSI devices(lpfc, qla2xxx, ...) when mq-deadline/kyber is used by not taking requests if hw queue is busy. Reviewed-by: Omar Sandoval Reviewed-by: Bart Van Assche Reviewed-by: Christoph Hellwig Signed-off-by: Ming Lei Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- block/blk-mq-sched.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 4ab69435708c2b..eca011fdfa0ede 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -94,7 +94,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) struct request_queue *q = hctx->queue; struct elevator_queue *e = q->elevator; const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; - bool did_work = false; + bool do_sched_dispatch = true; LIST_HEAD(rq_list); /* RCU or SRCU read lock is needed before checking quiesced flag */ @@ -125,18 +125,18 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) */ if (!list_empty(&rq_list)) { blk_mq_sched_mark_restart_hctx(hctx); - did_work = blk_mq_dispatch_rq_list(q, &rq_list); + do_sched_dispatch = blk_mq_dispatch_rq_list(q, &rq_list); } else if (!has_sched_dispatch) { blk_mq_flush_busy_ctxs(hctx, &rq_list); blk_mq_dispatch_rq_list(q, &rq_list); } /* - * We want to dispatch from the scheduler if we had no work left - * on the dispatch list, OR if we did have work but weren't able - * to make progress. + * We want to dispatch from the scheduler if there was nothing + * on the dispatch list or we were able to dispatch from the + * dispatch list. */ - if (!did_work && has_sched_dispatch) { + if (do_sched_dispatch && has_sched_dispatch) { do { struct request *rq; From a23c8c70b5ece8d18c49927a0bace4471d8563cb Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Fri, 15 Sep 2017 12:10:13 -0700 Subject: [PATCH 0716/3326] clk: tegra: Use readl_relaxed_poll_timeout_atomic() in tegra210_clock_init() [ Upstream commit 22ef01a203d27fee8b7694020b7e722db7efd2a7 ] Below is the call trace of tegra210_init_pllu() function: start_kernel() -> time_init() --> of_clk_init() ---> tegra210_clock_init() ----> tegra210_pll_init() -----> tegra210_init_pllu() Because the preemption is disabled in the start_kernel before calling time_init, tegra210_init_pllu is actually in an atomic context while it includes a readl_relaxed_poll_timeout that might sleep. So this patch just changes this readl_relaxed_poll_timeout() to its atomic version. Signed-off-by: Nicolin Chen Acked-By: Peter De Schrijver Signed-off-by: Thierry Reding Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clk/tegra/clk-tegra210.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 6d7a613f2656a4..b92867814e2d50 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -2566,8 +2566,8 @@ static int tegra210_enable_pllu(void) reg |= PLL_ENABLE; writel(reg, clk_base + PLLU_BASE); - readl_relaxed_poll_timeout(clk_base + PLLU_BASE, reg, - reg & PLL_BASE_LOCK, 2, 1000); + readl_relaxed_poll_timeout_atomic(clk_base + PLLU_BASE, reg, + reg & PLL_BASE_LOCK, 2, 1000); if (!(reg & PLL_BASE_LOCK)) { pr_err("Timed out waiting for PLL_U to lock\n"); return -ETIMEDOUT; From 3d213c4c0a9b0d8fb0958d4a461868525ee6d864 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Tue, 19 Sep 2017 04:48:10 +0200 Subject: [PATCH 0717/3326] clk: tegra: Fix cclk_lp divisor register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 54eff2264d3e9fd7e3987de1d7eba1d3581c631e ] According to comments in code and common sense, cclk_lp uses its own divisor, not cclk_g's. Fixes: b08e8c0ecc42 ("clk: tegra: add clock support for Tegra30") Signed-off-by: Michał Mirosław Acked-By: Peter De Schrijver Signed-off-by: Thierry Reding Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clk/tegra/clk-tegra30.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index a2d163f759b450..07f5203df01c05 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -964,7 +964,7 @@ static void __init tegra30_super_clk_init(void) * U71 divider of cclk_lp. */ clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3", - clk_base + SUPER_CCLKG_DIVIDER, 0, + clk_base + SUPER_CCLKLP_DIVIDER, 0, TEGRA_DIVIDER_INT, 16, 8, 1, NULL); clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL); From f7ee900a4f2c76902ce368e93d34b7580cc7ff04 Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Tue, 31 Oct 2017 18:25:37 +0800 Subject: [PATCH 0718/3326] ppp: Destroy the mutex when cleanup [ Upstream commit f02b2320b27c16b644691267ee3b5c110846f49e ] The mutex_destroy only makes sense when enable DEBUG_MUTEX. For the good readbility, it's better to invoke it in exit func when the init func invokes mutex_init. Signed-off-by: Gao Feng Acked-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ppp/ppp_generic.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index e365866600ba04..bf14c51f35e17d 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -959,6 +959,7 @@ static __net_exit void ppp_exit_net(struct net *net) unregister_netdevice_many(&list); rtnl_unlock(); + mutex_destroy(&pn->all_ppp_mutex); idr_destroy(&pn->units_idr); } From 556787a174e6f77c7aec167af846f1297e3860f8 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Wed, 1 Nov 2017 07:16:58 +0000 Subject: [PATCH 0719/3326] ASoC: rsnd: rsnd_ssi_run_mods() needs to care ssi_parent_mod [ Upstream commit 21781e87881f9c420871b1d1f3f29d4cd7bffb10 ] SSI parent mod might be NULL. ssi_parent_mod() needs to care about it. Otherwise, it uses negative shift. This patch fixes it. Signed-off-by: Kuninori Morimoto Signed-off-by: Mark Brown Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- sound/soc/sh/rcar/ssi.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index fffc07e7262741..2aef7c00cca110 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c @@ -198,10 +198,15 @@ static u32 rsnd_ssi_run_mods(struct rsnd_dai_stream *io) { struct rsnd_mod *ssi_mod = rsnd_io_to_mod_ssi(io); struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io); + u32 mods; - return rsnd_ssi_multi_slaves_runtime(io) | - 1 << rsnd_mod_id(ssi_mod) | - 1 << rsnd_mod_id(ssi_parent_mod); + mods = rsnd_ssi_multi_slaves_runtime(io) | + 1 << rsnd_mod_id(ssi_mod); + + if (ssi_parent_mod) + mods |= 1 << rsnd_mod_id(ssi_parent_mod); + + return mods; } u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io) From 5642562d0b4f616cac2b4d7f81de0a58f2247b59 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 19 Oct 2017 19:05:58 +0200 Subject: [PATCH 0720/3326] thermal/drivers/step_wise: Fix temperature regulation misbehavior MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 07209fcf33542c1ff1e29df2dbdf8f29cdaacb10 ] There is a particular situation when the cooling device is cpufreq and the heat dissipation is not efficient enough where the temperature increases little by little until reaching the critical threshold and leading to a SoC reset. The behavior is reproducible on a hikey6220 with bad heat dissipation (eg. stacked with other boards). Running a simple C program doing while(1); for each CPU of the SoC makes the temperature to reach the passive regulation trip point and ends up to the maximum allowed temperature followed by a reset. This issue has been also reported by running the libhugetlbfs test suite. What is observed is a ping pong between two cpu frequencies, 1.2GHz and 900MHz while the temperature continues to grow. It appears the step wise governor calls get_target_state() the first time with the throttle set to true and the trend to 'raising'. The code selects logically the next state, so the cpu frequency decreases from 1.2GHz to 900MHz, so far so good. The temperature decreases immediately but still stays greater than the trip point, then get_target_state() is called again, this time with the throttle set to true *and* the trend to 'dropping'. From there the algorithm assumes we have to step down the state and the cpu frequency jumps back to 1.2GHz. But the temperature is still higher than the trip point, so get_target_state() is called with throttle=1 and trend='raising' again, we jump to 900MHz, then get_target_state() is called with throttle=1 and trend='dropping', we jump to 1.2GHz, etc ... but the temperature does not stabilizes and continues to increase. [ 237.922654] thermal thermal_zone0: Trip0[type=1,temp=65000]:trend=1,throttle=1 [ 237.922678] thermal thermal_zone0: Trip1[type=1,temp=75000]:trend=1,throttle=1 [ 237.922690] thermal cooling_device0: cur_state=0 [ 237.922701] thermal cooling_device0: old_target=0, target=1 [ 238.026656] thermal thermal_zone0: Trip0[type=1,temp=65000]:trend=2,throttle=1 [ 238.026680] thermal thermal_zone0: Trip1[type=1,temp=75000]:trend=2,throttle=1 [ 238.026694] thermal cooling_device0: cur_state=1 [ 238.026707] thermal cooling_device0: old_target=1, target=0 [ 238.134647] thermal thermal_zone0: Trip0[type=1,temp=65000]:trend=1,throttle=1 [ 238.134667] thermal thermal_zone0: Trip1[type=1,temp=75000]:trend=1,throttle=1 [ 238.134679] thermal cooling_device0: cur_state=0 [ 238.134690] thermal cooling_device0: old_target=0, target=1 In this situation the temperature continues to increase while the trend is oscillating between 'dropping' and 'raising'. We need to keep the current state untouched if the throttle is set, so the temperature can decrease or a higher state could be selected, thus preventing this oscillation. Keeping the next_target untouched when 'throttle' is true at 'dropping' time fixes the issue. The following traces show the governor does not change the next state if trend==2 (dropping) and throttle==1. [ 2306.127987] thermal thermal_zone0: Trip0[type=1,temp=65000]:trend=1,throttle=1 [ 2306.128009] thermal thermal_zone0: Trip1[type=1,temp=75000]:trend=1,throttle=1 [ 2306.128021] thermal cooling_device0: cur_state=0 [ 2306.128031] thermal cooling_device0: old_target=0, target=1 [ 2306.231991] thermal thermal_zone0: Trip0[type=1,temp=65000]:trend=2,throttle=1 [ 2306.232016] thermal thermal_zone0: Trip1[type=1,temp=75000]:trend=2,throttle=1 [ 2306.232030] thermal cooling_device0: cur_state=1 [ 2306.232042] thermal cooling_device0: old_target=1, target=1 [ 2306.335982] thermal thermal_zone0: Trip0[type=1,temp=65000]:trend=0,throttle=1 [ 2306.336006] thermal thermal_zone0: Trip1[type=1,temp=75000]:trend=0,throttle=1 [ 2306.336021] thermal cooling_device0: cur_state=1 [ 2306.336034] thermal cooling_device0: old_target=1, target=1 [ 2306.439984] thermal thermal_zone0: Trip0[type=1,temp=65000]:trend=2,throttle=1 [ 2306.440008] thermal thermal_zone0: Trip1[type=1,temp=75000]:trend=2,throttle=0 [ 2306.440022] thermal cooling_device0: cur_state=1 [ 2306.440034] thermal cooling_device0: old_target=1, target=0 [ ... ] After a while, if the temperature continues to increase, the next state becomes 2 which is 720MHz on the hikey. That results in the temperature stabilizing around the trip point. [ 2455.831982] thermal thermal_zone0: Trip0[type=1,temp=65000]:trend=1,throttle=1 [ 2455.832006] thermal thermal_zone0: Trip1[type=1,temp=75000]:trend=1,throttle=0 [ 2455.832019] thermal cooling_device0: cur_state=1 [ 2455.832032] thermal cooling_device0: old_target=1, target=1 [ 2455.935985] thermal thermal_zone0: Trip0[type=1,temp=65000]:trend=0,throttle=1 [ 2455.936013] thermal thermal_zone0: Trip1[type=1,temp=75000]:trend=0,throttle=0 [ 2455.936027] thermal cooling_device0: cur_state=1 [ 2455.936040] thermal cooling_device0: old_target=1, target=1 [ 2456.043984] thermal thermal_zone0: Trip0[type=1,temp=65000]:trend=0,throttle=1 [ 2456.044009] thermal thermal_zone0: Trip1[type=1,temp=75000]:trend=0,throttle=0 [ 2456.044023] thermal cooling_device0: cur_state=1 [ 2456.044036] thermal cooling_device0: old_target=1, target=1 [ 2456.148001] thermal thermal_zone0: Trip0[type=1,temp=65000]:trend=1,throttle=1 [ 2456.148028] thermal thermal_zone0: Trip1[type=1,temp=75000]:trend=1,throttle=1 [ 2456.148042] thermal cooling_device0: cur_state=1 [ 2456.148055] thermal cooling_device0: old_target=1, target=2 [ 2456.252009] thermal thermal_zone0: Trip0[type=1,temp=65000]:trend=2,throttle=1 [ 2456.252041] thermal thermal_zone0: Trip1[type=1,temp=75000]:trend=2,throttle=0 [ 2456.252058] thermal cooling_device0: cur_state=2 [ 2456.252075] thermal cooling_device0: old_target=2, target=1 IOW, this change is needed to keep the state for a cooling device if the temperature trend is oscillating while the temperature increases slightly. Without this change, the situation above leads to a catastrophic crash by a hardware reset on hikey. This issue has been reported to happen on an OMAP dra7xx also. Signed-off-by: Daniel Lezcano Cc: Keerthy Cc: John Stultz Cc: Leo Yan Tested-by: Keerthy Reviewed-by: Keerthy Signed-off-by: Eduardo Valentin Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/thermal/step_wise.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c index be95826631b72c..ee047ca43084dc 100644 --- a/drivers/thermal/step_wise.c +++ b/drivers/thermal/step_wise.c @@ -31,8 +31,7 @@ * If the temperature is higher than a trip point, * a. if the trend is THERMAL_TREND_RAISING, use higher cooling * state for this trip point - * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling - * state for this trip point + * b. if the trend is THERMAL_TREND_DROPPING, do nothing * c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit * for this trip point * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit @@ -94,9 +93,11 @@ static unsigned long get_target_state(struct thermal_instance *instance, if (!throttle) next_target = THERMAL_NO_TARGET; } else { - next_target = cur_state - 1; - if (next_target > instance->upper) - next_target = instance->upper; + if (!throttle) { + next_target = cur_state - 1; + if (next_target > instance->upper) + next_target = instance->upper; + } } break; case THERMAL_TREND_DROP_FULL: From 85d63b76bdd1b570b9c16e60f83eb3185e22b30d Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Wed, 11 Oct 2017 14:14:36 +0530 Subject: [PATCH 0721/3326] misc: pci_endpoint_test: Fix failure path return values in probe [ Upstream commit 80068c93688f6143100859c4856f895801c1a1d9 ] Return value of pci_endpoint_test_probe is not set properly in a couple of failure cases. Fix it here. Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Bjorn Helgaas Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/misc/pci_endpoint_test.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index deb20302649652..e787a63a321a76 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -533,6 +533,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, test->base = test->bar[test_reg_bar]; if (!test->base) { + err = -ENOMEM; dev_err(dev, "Cannot perform PCI test without BAR%d\n", test_reg_bar); goto err_iounmap; @@ -542,6 +543,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL); if (id < 0) { + err = id; dev_err(dev, "unable to get id\n"); goto err_iounmap; } From 95e8d653ea987adfe108173bd5969201a363bf19 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 30 Sep 2017 11:16:51 +0300 Subject: [PATCH 0722/3326] misc: pci_endpoint_test: Avoid triggering a BUG() [ Upstream commit 846df244ebefbc9f7b91e9ae7a5e5a2e69fb4772 ] If you call ida_simple_remove(&pci_endpoint_test_ida, id) with a negative "id" then it triggers an immediate BUG_ON(). Let's not allow that. Fixes: 2c156ac71c6b ("misc: Add host side PCI driver for PCI test function device") Signed-off-by: Dan Carpenter Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/misc/pci_endpoint_test.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index e787a63a321a76..e089bb6dde3a84 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -590,6 +590,8 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev) if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1) return; + if (id < 0) + return; misc_deregister(&test->miscdev); ida_simple_remove(&pci_endpoint_test_ida, id); From aa902be00448f3bc69fd54b1a0a8f5b4b6a47c5b Mon Sep 17 00:00:00 2001 From: Douglas Gilbert Date: Sun, 29 Oct 2017 10:47:19 -0400 Subject: [PATCH 0723/3326] scsi: scsi_debug: write_same: fix error report [ Upstream commit e33d7c56450b0a5c7290cbf9e1581fab5174f552 ] The scsi_debug driver incorrectly suggests there is an error with the SCSI WRITE SAME command when the number_of_logical_blocks is greater than 1. It will also suggest there is an error when NDOB (no data-out buffer) is set and the number_of_logical_blocks is greater than 0. Both are valid, fix. Signed-off-by: Douglas Gilbert Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/scsi_debug.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 09ba494f889673..92bc5b2d24ae9d 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3001,11 +3001,11 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, if (-1 == ret) { write_unlock_irqrestore(&atomic_rw, iflags); return DID_ERROR << 16; - } else if (sdebug_verbose && (ret < (num * sdebug_sector_size))) + } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size)) sdev_printk(KERN_INFO, scp->device, - "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", + "%s: %s: lb size=%u, IO sent=%d bytes\n", my_name, "write same", - num * sdebug_sector_size, ret); + sdebug_sector_size, ret); /* Copy first sector to remaining blocks */ for (i = 1 ; i < num ; i++) From 1678bb970113e62469ca510bff20eb95bbdeaec2 Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Wed, 20 Sep 2017 08:30:04 -0500 Subject: [PATCH 0724/3326] GFS2: Take inode off order_write list when setting jdata flag [ Upstream commit cc555b09d8c3817aeebda43a14ab67049a5653f7 ] This patch fixes a deadlock caused when the jdata flag is set for inodes that are already on the ordered write list. Since it is on the ordered write list, log_flush calls gfs2_ordered_write which calls filemap_fdatawrite. But since the inode had the jdata flag set, that calls gfs2_jdata_writepages, which tries to start a new transaction. A new transaction cannot be started because it tries to acquire the log_flush rwsem which is already locked by the log flush operation. The bottom line is: We cannot switch an inode from ordered to jdata until we eliminate any ordered data pages (via log flush) or any log_flush operation afterward will create the circular dependency above. So we need to flush the log before setting the diskflags to switch the file mode, then we need to remove the inode from the ordered writes list. Before this patch, the log flush was done for jdata->ordered, but that's wrong. If we're going from jdata to ordered, we don't need to call gfs2_log_flush because the call to filemap_fdatawrite will do it for us: filemap_fdatawrite() -> __filemap_fdatawrite_range() __filemap_fdatawrite_range() -> do_writepages() do_writepages() -> gfs2_jdata_writepages() gfs2_jdata_writepages() -> gfs2_log_flush() This patch modifies function do_gfs2_set_flags so that if a file has its jdata flag set, and it's already on the ordered write list, the log will be flushed and it will be removed from the list before setting the flag. Signed-off-by: Bob Peterson Signed-off-by: Andreas Gruenbacher Acked-by: Abhijith Das Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/gfs2/file.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 33a0cb5701a330..2a29cf3371f696 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -256,7 +256,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) goto out; } if ((flags ^ new_flags) & GFS2_DIF_JDATA) { - if (flags & GFS2_DIF_JDATA) + if (new_flags & GFS2_DIF_JDATA) gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); error = filemap_fdatawrite(inode->i_mapping); if (error) @@ -264,6 +264,8 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) error = filemap_fdatawait(inode->i_mapping); if (error) goto out; + if (new_flags & GFS2_DIF_JDATA) + gfs2_ordered_del_inode(ip); } error = gfs2_trans_begin(sdp, RES_DINODE, 0); if (error) From 7291d99ebc67be887e5c915489706e6fe720992a Mon Sep 17 00:00:00 2001 From: Adam Sampson Date: Tue, 24 Oct 2017 16:14:46 -0400 Subject: [PATCH 0725/3326] media: usbtv: fix brightness and contrast controls [ Upstream commit b3168c87c0492661badc3e908f977d79e7738a41 ] Because the brightness and contrast controls share a register, usbtv_s_ctrl needs to read the existing values for both controls before inserting the new value. However, the code accidentally wrote to the registers (from an uninitialised stack array), rather than reading them. The user-visible effect of this was that adjusting the brightness would also set the contrast to a random value, and vice versa -- so it wasn't possible to correctly adjust the brightness of usbtv's video output. Tested with an "EasyDAY" UTV007 device. Fixes: c53a846c48f2 ("usbtv: add video controls") Signed-off-by: Adam Sampson Reviewed-by: Lubomir Rintel Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/usbtv/usbtv-video.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c index 95b5f4319ec25a..3668a04359e800 100644 --- a/drivers/media/usb/usbtv/usbtv-video.c +++ b/drivers/media/usb/usbtv/usbtv-video.c @@ -718,8 +718,8 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl) */ if (ctrl->id == V4L2_CID_BRIGHTNESS || ctrl->id == V4L2_CID_CONTRAST) { ret = usb_control_msg(usbtv->udev, - usb_sndctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + usb_rcvctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, USBTV_BASE + 0x0244, (void *)data, 3, 0); if (ret < 0) goto error; From 81309f8b9b69913233733ed61c4c87dab5436902 Mon Sep 17 00:00:00 2001 From: Arun Kumar Neelakantam Date: Mon, 30 Oct 2017 11:11:24 +0530 Subject: [PATCH 0726/3326] rpmsg: glink: Initialize the "intent_req_comp" completion variable [ Upstream commit 2394facb17bcace4b3c19b50202177a5d8903b64 ] The "intent_req_comp" variable is used without initialization which results in NULL pointer dereference in qcom_glink_request_intent(). we need to initialize the completion variable before using it. Fixes: 27b9c5b66b23 ("rpmsg: glink: Request for intents when unavailable") Signed-off-by: Arun Kumar Neelakantam Signed-off-by: Bjorn Andersson Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/rpmsg/qcom_glink_native.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index e3242a0925a671..e8e12c2b1d0e0c 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -227,6 +227,7 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink, init_completion(&channel->open_req); init_completion(&channel->open_ack); + init_completion(&channel->intent_req_comp); INIT_LIST_HEAD(&channel->done_intents); INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work); From c9973b3d35267028edf8b9a4df6310161bd0ef37 Mon Sep 17 00:00:00 2001 From: Liang Chen Date: Mon, 30 Oct 2017 14:46:35 -0700 Subject: [PATCH 0727/3326] bcache: explicitly destroy mutex while exiting [ Upstream commit 330a4db89d39a6b43f36da16824eaa7a7509d34d ] mutex_destroy does nothing most of time, but it's better to call it to make the code future proof and it also has some meaning for like mutex debug. As Coly pointed out in a previous review, bcache_exit() may not be able to handle all the references properly if userspace registers cache and backing devices right before bch_debug_init runs and bch_debug_init failes later. So not exposing userspace interface until everything is ready to avoid that issue. Signed-off-by: Liang Chen Reviewed-by: Michael Lyle Reviewed-by: Coly Li Reviewed-by: Eric Wheeler Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/md/bcache/super.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index fc0a31b13ac423..25bf003fb198b6 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -2085,6 +2085,7 @@ static void bcache_exit(void) if (bcache_major) unregister_blkdev(bcache_major, "bcache"); unregister_reboot_notifier(&reboot); + mutex_destroy(&bch_register_lock); } static int __init bcache_init(void) @@ -2103,14 +2104,15 @@ static int __init bcache_init(void) bcache_major = register_blkdev(0, "bcache"); if (bcache_major < 0) { unregister_reboot_notifier(&reboot); + mutex_destroy(&bch_register_lock); return bcache_major; } if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) || !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || - sysfs_create_files(bcache_kobj, files) || bch_request_init() || - bch_debug_init(bcache_kobj)) + bch_debug_init(bcache_kobj) || + sysfs_create_files(bcache_kobj, files)) goto err; return 0; From d5860344c243308c0d5e9751553d52d957b29e5b Mon Sep 17 00:00:00 2001 From: "tang.junhui" Date: Mon, 30 Oct 2017 14:46:34 -0700 Subject: [PATCH 0728/3326] bcache: fix wrong cache_misses statistics [ Upstream commit c157313791a999646901b3e3c6888514ebc36d62 ] Currently, Cache missed IOs are identified by s->cache_miss, but actually, there are many situations that missed IOs are not assigned a value for s->cache_miss in cached_dev_cache_miss(), for example, a bypassed IO (s->iop.bypass = 1), or the cache_bio allocate failed. In these situations, it will go to out_put or out_submit, and s->cache_miss is null, which leads bch_mark_cache_accounting() to treat this IO as a hit IO. [ML: applied by 3-way merge] Signed-off-by: tang.junhui Reviewed-by: Michael Lyle Reviewed-by: Coly Li Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/md/bcache/request.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 14d13cab5cda4e..e9fbf2bcd122b8 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -463,6 +463,7 @@ struct search { unsigned recoverable:1; unsigned write:1; unsigned read_dirty_data:1; + unsigned cache_missed:1; unsigned long start_time; @@ -649,6 +650,7 @@ static inline struct search *search_alloc(struct bio *bio, s->orig_bio = bio; s->cache_miss = NULL; + s->cache_missed = 0; s->d = d; s->recoverable = 1; s->write = op_is_write(bio_op(bio)); @@ -767,7 +769,7 @@ static void cached_dev_read_done_bh(struct closure *cl) struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); bch_mark_cache_accounting(s->iop.c, s->d, - !s->cache_miss, s->iop.bypass); + !s->cache_missed, s->iop.bypass); trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); if (s->iop.status) @@ -786,6 +788,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct bio *miss, *cache_bio; + s->cache_missed = 1; + if (s->cache_miss || s->iop.bypass) { miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); ret = miss == bio ? MAP_DONE : MAP_CONTINUE; From 7d1beb462e18b55fa5d0990d0f60fbf4edfda61b Mon Sep 17 00:00:00 2001 From: Patel Jay P Date: Mon, 23 Oct 2017 06:05:53 -0700 Subject: [PATCH 0729/3326] Ib/hfi1: Return actual operational VLs in port info query [ Upstream commit 00f9203119dd2774564407c7a67b17d81916298b ] __subn_get_opa_portinfo stores value returned by hfi1_get_ib_cfg() as operational vls. hfi1_get_ib_cfg() returns vls_operational field in hfi1_pportdata. The problem with this is that the value is always equal to vls_supported field in hfi1_pportdata. The logic to calculate operational_vls is to set value passed by FM (in __subn_set_opa_portinfo routine). If no value is passed then default value is stored in operational_vls. Field actual_vls_operational is calculated on the basis of buffer control table. Hence, modifying hfi1_get_ib_cfg() to return actual_operational_vls when used with HFI1_IB_CFG_OP_VLS parameter Reviewed-by: Mike Marciniszyn Reviewed-by: Dennis Dalessandro Signed-off-by: Patel Jay P Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/hfi1/chip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 312444386f5413..0e17d03ef1cb28 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -9952,7 +9952,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which) goto unimplemented; case HFI1_IB_CFG_OP_VLS: - val = ppd->vls_operational; + val = ppd->actual_vls_operational; break; case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */ val = VL_ARB_HIGH_PRIO_TABLE_SIZE; From e508e6026b198b9879e0d1850fdef8ef1d4d0805 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ronald=20Tschal=C3=A4r?= Date: Wed, 25 Oct 2017 22:15:19 -0700 Subject: [PATCH 0730/3326] Bluetooth: hci_ldisc: Fix another race when closing the tty. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 0338b1b393ec7910898e8f7b25b3bf31a7282e16 ] The following race condition still existed: P1 P2 cancel_work_sync() hci_uart_tx_wakeup() hci_uart_write_work() hci_uart_dequeue() clear_bit(HCI_UART_PROTO_READY) hci_unregister_dev(hdev) hci_free_dev(hdev) hu->proto->close(hu) kfree(hu) access to hdev and hu Cancelling the work after clearing the HCI_UART_PROTO_READY bit avoids this as any hci_uart_tx_wakeup() issued after the flag is cleared will detect that and not schedule further work. Signed-off-by: Ronald Tschalär Reviewed-by: Lukas Wunner Signed-off-by: Marcel Holtmann Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/bluetooth/hci_ldisc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index a746627e784e7a..6e24038057842e 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -510,13 +510,13 @@ static void hci_uart_tty_close(struct tty_struct *tty) if (hdev) hci_uart_close(hdev); - cancel_work_sync(&hu->write_work); - if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) { write_lock_irqsave(&hu->proto_lock, flags); clear_bit(HCI_UART_PROTO_READY, &hu->flags); write_unlock_irqrestore(&hu->proto_lock, flags); + cancel_work_sync(&hu->write_work); + if (hdev) { if (test_bit(HCI_UART_REGISTERED, &hu->flags)) hci_unregister_dev(hdev); From 7bd6bf08dd5b30dbe0696a7387dedc65a5f9a3e6 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Fri, 27 Oct 2017 09:33:41 -0700 Subject: [PATCH 0731/3326] arm64: prevent regressions in compressed kernel image size when upgrading to binutils 2.27 [ Upstream commit fd9dde6abcb9bfe6c6bee48834e157999f113971 ] Upon upgrading to binutils 2.27, we found that our lz4 and gzip compressed kernel images were significantly larger, resulting is 10ms boot time regressions. As noted by Rahul: "aarch64 binaries uses RELA relocations, where each relocation entry includes an addend value. This is similar to x86_64. On x86_64, the addend values are also stored at the relocation offset for relative relocations. This is an optimization: in the case where code does not need to be relocated, the loader can simply skip processing relative relocations. In binutils-2.25, both bfd and gold linkers did this for x86_64, but only the gold linker did this for aarch64. The kernel build here is using the bfd linker, which stored zeroes at the relocation offsets for relative relocations. Since a set of zeroes compresses better than a set of non-zero addend values, this behavior was resulting in much better lz4 compression. The bfd linker in binutils-2.27 is now storing the actual addend values at the relocation offsets. The behavior is now consistent with what it does for x86_64 and what gold linker does for both architectures. The change happened in this upstream commit: https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=1f56df9d0d5ad89806c24e71f296576d82344613 Since a bunch of zeroes got replaced by non-zero addend values, we see the side effect of lz4 compressed image being a bit bigger. To get the old behavior from the bfd linker, "--no-apply-dynamic-relocs" flag can be used: $ LDFLAGS="--no-apply-dynamic-relocs" make With this flag, the compressed image size is back to what it was with binutils-2.25. If the kernel is using ASLR, there aren't additional runtime costs to --no-apply-dynamic-relocs, as the relocations will need to be applied again anyway after the kernel is relocated to a random address. If the kernel is not using ASLR, then presumably the current default behavior of the linker is better. Since the static linker performed the dynamic relocs, and the kernel is not moved to a different address at load time, it can skip applying the relocations all over again." Some measurements: $ ld -v GNU ld (binutils-2.25-f3d35cf6) 2.25.51.20141117 ^ $ ls -l vmlinux -rwxr-x--- 1 ndesaulniers eng 300652760 Oct 26 11:57 vmlinux $ ls -l Image.lz4-dtb -rw-r----- 1 ndesaulniers eng 16932627 Oct 26 11:57 Image.lz4-dtb $ ld -v GNU ld (binutils-2.27-53dd00a1) 2.27.0.20170315 ^ pre patch: $ ls -l vmlinux -rwxr-x--- 1 ndesaulniers eng 300376208 Oct 26 11:43 vmlinux $ ls -l Image.lz4-dtb -rw-r----- 1 ndesaulniers eng 18159474 Oct 26 11:43 Image.lz4-dtb post patch: $ ls -l vmlinux -rwxr-x--- 1 ndesaulniers eng 300376208 Oct 26 12:06 vmlinux $ ls -l Image.lz4-dtb -rw-r----- 1 ndesaulniers eng 16932466 Oct 26 12:06 Image.lz4-dtb By Siqi's measurement w/ gzip: binutils 2.27 with this patch (with --no-apply-dynamic-relocs): Image 41535488 Image.gz 13404067 binutils 2.27 without this patch (without --no-apply-dynamic-relocs): Image 41535488 Image.gz 14125516 Any compression scheme should be able to get better results from the longer runs of zeros, not just GZIP and LZ4. 10ms boot time savings isn't anything to get excited about, but users of arm64+compression+bfd-2.27 should not have to pay a penalty for no runtime improvement. Reported-by: Gopinath Elanchezhian Reported-by: Sindhuri Pentyala Reported-by: Wei Wang Suggested-by: Ard Biesheuvel Suggested-by: Rahul Chaudhry Suggested-by: Siqi Lin Suggested-by: Stephen Hines Signed-off-by: Nick Desaulniers Reviewed-by: Ard Biesheuvel [will: added comment to Makefile] Signed-off-by: Will Deacon Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm64/Makefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 3eb4397150df8c..7318165cfc90b5 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -14,8 +14,12 @@ LDFLAGS_vmlinux :=-p --no-undefined -X CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) GZFLAGS :=-9 -ifneq ($(CONFIG_RELOCATABLE),) -LDFLAGS_vmlinux += -pie -shared -Bsymbolic +ifeq ($(CONFIG_RELOCATABLE), y) +# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour +# for relative relocs, since this leads to better Image compression +# with the relocation offsets always being zero. +LDFLAGS_vmlinux += -pie -shared -Bsymbolic \ + $(call ld-option, --no-apply-dynamic-relocs) endif ifeq ($(CONFIG_ARM64_ERRATUM_843419),y) From 4bcbfac98d517eae6d12257aed923347be16930c Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Sat, 14 Oct 2017 08:34:02 +0800 Subject: [PATCH 0732/3326] btrfs: fix false EIO for missing device MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 102ed2c5ff932439bbbe74c7bd63e6d5baa9f732 ] When one of the device is missing, bbio_error() takes care of setting the error status. And if its only IO that is pending in that stripe, it fails to check the status of the other IO at %bbio_error before setting the error %bi_status for the %orig_bio. Fix this by checking if %bbio->error has exceeded the %bbio->max_errors. Reproducer as below fdatasync error is seen intermittently. mount -o degraded /dev/sdc /btrfs dd status=none if=/dev/zero of=$(mktemp /btrfs/XXX) bs=4096 count=1 conv=fdatasync dd: fdatasync failed for ‘/btrfs/LSe’: Input/output error The reason for the intermittences of the problem is because the following conditions have to be met, which depends on timing: In btrfs_map_bio() - the RAID1 the missing device has to be at %dev_nr = 1 In bbio_error() . before bbio_error() is called the bio of the not-missing device at %dev_nr = 0 must be completed so that the below condition is true if (atomic_dec_and_test(&bbio->stripes_pending)) { Signed-off-by: Anand Jain Reviewed-by: Liu Bo Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/volumes.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index b39737568c223c..abc57f7b27160c 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6144,7 +6144,10 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; bio->bi_iter.bi_sector = logical >> 9; - bio->bi_status = BLK_STS_IOERR; + if (atomic_read(&bbio->error) > bbio->max_errors) + bio->bi_status = BLK_STS_IOERR; + else + bio->bi_status = BLK_STS_OK; btrfs_end_bbio(bbio, bio); } } From 9e87c49d62f4c5b9ae4a56b8f62865c4712c8d4d Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 28 Sep 2017 10:53:17 +0300 Subject: [PATCH 0733/3326] btrfs: Explicitly handle btrfs_update_root failure [ Upstream commit 9417ebc8a676487c6ec8825f92fb28f7dbeb5f4b ] btrfs_udpate_root can fail and it aborts the transaction, the correct way to handle an aborted transaction is to explicitly end with btrfs_end_transaction. Even now the code is correct since btrfs_commit_transaction would handle an aborted transaction but this is more of an implementation detail. So let's be explicit in handling failure in btrfs_update_root. Furthermore btrfs_commit_transaction can also fail and by ignoring it's return value we could have left the in-memory copy of the root item in an inconsistent state. So capture the error value which allows us to correctly revert the RO/RW flags in case of commit failure. Signed-off-by: Nikolay Borisov Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/ioctl.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 6c7a49faf4e063..1f1338d523038d 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1842,8 +1842,13 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, ret = btrfs_update_root(trans, fs_info->tree_root, &root->root_key, &root->root_item); + if (ret < 0) { + btrfs_end_transaction(trans); + goto out_reset; + } + + ret = btrfs_commit_transaction(trans); - btrfs_commit_transaction(trans); out_reset: if (ret) btrfs_set_root_flags(&root->root_item, root_flags); From da76a65a011490a0b0bd9f50d872f26e629d1eed Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Thu, 28 Sep 2017 14:51:09 +0800 Subject: [PATCH 0734/3326] btrfs: undo writable superblocke when sprouting fails [ Upstream commit 0af2c4bf5a012a40a2f9230458087d7f068339d0 ] When new device is being added to seed FS, seed FS is marked writable, but when we fail to bring in the new device, we missed to undo the writable part. This patch fixes it. Signed-off-by: Anand Jain Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/volumes.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index abc57f7b27160c..0c11121a8ace70 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2501,6 +2501,8 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path return ret; error_trans: + if (seeding_dev) + sb->s_flags |= MS_RDONLY; btrfs_end_transaction(trans); rcu_string_free(device->name); btrfs_sysfs_rm_device_link(fs_info->fs_devices, device); From 864a5fb1c6066ade93a640a279bcd191952e4181 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 11 Sep 2017 16:15:28 +0100 Subject: [PATCH 0735/3326] btrfs: avoid null pointer dereference on fs_info when calling btrfs_crit [ Upstream commit 3993b112dac968612b0b213ed59cb30f50b0015b ] There are checks on fs_info in __btrfs_panic to avoid dereferencing a null fs_info, however, there is a call to btrfs_crit that may also dereference a null fs_info. Fix this by adding a check to see if fs_info is null and only print the s_id if fs_info is non-null. Detected by CoverityScan CID#401973 ("Dereference after null check") Fixes: efe120a067c8 ("Btrfs: convert printk to btrfs_ and fix BTRFS prefix") Signed-off-by: Colin Ian King Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/super.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 161694b660385a..e8f5e24325f338 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -202,7 +202,6 @@ static struct ratelimit_state printk_limits[] = { void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) { - struct super_block *sb = fs_info->sb; char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0"; struct va_format vaf; va_list args; @@ -228,7 +227,8 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) vaf.va = &args; if (__ratelimit(ratelimit)) - printk("%sBTRFS %s (device %s): %pV\n", lvl, type, sb->s_id, &vaf); + printk("%sBTRFS %s (device %s): %pV\n", lvl, type, + fs_info ? fs_info->sb->s_id : "", &vaf); va_end(args); } From c97df8e004de63e50384706d00adb590adcef054 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 10 Sep 2017 13:19:38 +0200 Subject: [PATCH 0736/3326] btrfs: tests: Fix a memory leak in error handling path in 'run_test()' [ Upstream commit 9ca2e97fa3c3216200afe35a3b111ec51cc796d2 ] If 'btrfs_alloc_path()' fails, we must free the resources already allocated, as done in the other error handling paths in this function. Signed-off-by: Christophe JAILLET Reviewed-by: Qu Wenruo Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/tests/free-space-tree-tests.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c index 1458bb0ea124a3..8444a018cca292 100644 --- a/fs/btrfs/tests/free-space-tree-tests.c +++ b/fs/btrfs/tests/free-space-tree-tests.c @@ -500,7 +500,8 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize, path = btrfs_alloc_path(); if (!path) { test_msg("Couldn't allocate path\n"); - return -ENOMEM; + ret = -ENOMEM; + goto out; } ret = add_block_group_free_space(&trans, root->fs_info, cache); From bca4d7e1a19cfdde2a4a340339e46248a691b196 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Mon, 30 Oct 2017 13:13:46 +0300 Subject: [PATCH 0737/3326] qtnfmac: modify full Tx queue error reporting [ Upstream commit e9931f984dd1e80adb3b5e095ef175fe383bc92d ] Under heavy load it is normal that h/w Tx queue is almost full all the time and reclaim should be done before transmitting next packet. Warning still should be reported as well as s/w Tx queues should be stopped in the case when reclaim failed. Signed-off-by: Sergey Matyukevich Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 69131965a298fd..146e42a132e726 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -643,11 +643,11 @@ static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv) { if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, priv->tx_bd_num)) { - pr_err_ratelimited("reclaim full Tx queue\n"); qtnf_pcie_data_tx_reclaim(priv); if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, priv->tx_bd_num)) { + pr_warn_ratelimited("reclaim full Tx queue\n"); priv->tx_full_count++; return 0; } From 000a335b2d3828382bb4612ceee176a5cbd05752 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 26 Oct 2017 17:12:33 +0200 Subject: [PATCH 0738/3326] mtd: spi-nor: stm32-quadspi: Fix uninitialized error return code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 05521bd3d117704a1458eb4d0c3ae821858658f2 ] With gcc 4.1.2: drivers/mtd/spi-nor/stm32-quadspi.c: In function ‘stm32_qspi_tx_poll’: drivers/mtd/spi-nor/stm32-quadspi.c:230: warning: ‘ret’ may be used uninitialized in this function Indeed, if stm32_qspi_cmd.len is zero, ret will be uninitialized. This length is passed from outside the driver using the spi_nor.{read,write}{,_reg}() callbacks. Several functions in drivers/mtd/spi-nor/spi-nor.c (e.g. write_enable(), write_disable(), and erase_chip()) call spi_nor.write_reg() with a zero length. Fix this by returning an explicit zero on success. Fixes: 0d43d7ab277a048c ("mtd: spi-nor: add driver for STM32 quad spi flash controller") Signed-off-by: Geert Uytterhoeven Acked-by: Ludovic Barre Signed-off-by: Cyrille Pitchen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/spi-nor/stm32-quadspi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c index 86c0931543c538..ad6a3e1844cbe5 100644 --- a/drivers/mtd/spi-nor/stm32-quadspi.c +++ b/drivers/mtd/spi-nor/stm32-quadspi.c @@ -240,12 +240,12 @@ static int stm32_qspi_tx_poll(struct stm32_qspi *qspi, STM32_QSPI_FIFO_TIMEOUT_US); if (ret) { dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr); - break; + return ret; } tx_fifo(buf++, qspi->io_base + QUADSPI_DR); } - return ret; + return 0; } static int stm32_qspi_tx_mm(struct stm32_qspi *qspi, From a54d17dba2ba4a1eaab65fd282f4b4c9bf80418b Mon Sep 17 00:00:00 2001 From: Neil Armstrong Date: Thu, 19 Oct 2017 12:31:09 +0200 Subject: [PATCH 0739/3326] ARM64: dts: meson-gxbb-odroidc2: fix usb1 power supply [ Upstream commit e841ec956e539f4002f5e9fe9f9e904dcca12d5d ] Looking at the schematics, the USB Power Supply is shared between the two USB interfaces, If the usb0 fails to initialize, the second one won't have power. Fixes: 5a0803bd5ae2 ("ARM64: dts: meson-gxbb-odroidc2: Enable USB Nodes") Signed-off-by: Neil Armstrong Signed-off-by: Kevin Hilman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index 1ffa1c238a7253..08b7bb7f5b7496 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -301,6 +301,7 @@ &usb1_phy { status = "okay"; + phy-supply = <&usb_otg_pwr>; }; &usb0 { From c0f98c0dbced1c1b48fd4427075f8a5295b402fb Mon Sep 17 00:00:00 2001 From: Bartosz Chronowski Date: Thu, 26 Oct 2017 10:22:43 +0200 Subject: [PATCH 0740/3326] Bluetooth: btusb: Add new NFA344A entry. [ Upstream commit 858ff38af77fc660092e82474ecc6ac135ed29fe ] This change allows proper low power mode entry in suspend. /sys/kernel/debug/usb/devices entry: T: Bus=01 Lev=01 Prnt=01 Port=05 Cnt=03 Dev#= 3 Spd=12 MxCh= 0 D: Ver= 2.01 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=0489 ProdID=e09f Rev= 0.01 C:* #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=100mA I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=1ms E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms Signed-off-by: Bartosz Chronowski Signed-off-by: Marcel Holtmann Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/bluetooth/btusb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 7a5c06aaa18106..513a7a59d421e7 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -272,6 +272,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME }, From 06f430379273700696eef22159117789853a38e3 Mon Sep 17 00:00:00 2001 From: Tushar Dave Date: Fri, 27 Oct 2017 16:12:30 -0700 Subject: [PATCH 0741/3326] samples/bpf: adjust rlimit RLIMIT_MEMLOCK for xdp1 [ Upstream commit 6dfca831c03ef654b1f7bff1b8d487d330e9f76b ] Default rlimit RLIMIT_MEMLOCK is 64KB, causes bpf map failure. e.g. [root@lab bpf]#./xdp1 -N $( Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- samples/bpf/xdp1_user.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c index 2431c0321b712c..fdaefe91801d9c 100644 --- a/samples/bpf/xdp1_user.c +++ b/samples/bpf/xdp1_user.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "bpf_load.h" #include "bpf_util.h" @@ -69,6 +70,7 @@ static void usage(const char *prog) int main(int argc, char **argv) { + struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; const char *optstr = "SN"; char filename[256]; int opt; @@ -91,6 +93,12 @@ int main(int argc, char **argv) usage(basename(argv[0])); return 1; } + + if (setrlimit(RLIMIT_MEMLOCK, &r)) { + perror("setrlimit(RLIMIT_MEMLOCK)"); + return 1; + } + ifindex = strtoul(argv[optind], NULL, 0); snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); From 6bd7a9bc48ac642cb3ded1e40fcfee44c8a0aa1a Mon Sep 17 00:00:00 2001 From: Felix Manlunas Date: Thu, 26 Oct 2017 16:46:36 -0700 Subject: [PATCH 0742/3326] liquidio: fix kernel panic in VF driver [ Upstream commit aa28667cfbe4ff6f14454dda210b1f2e485f99b5 ] Doing ifconfig down on VF driver in the middle of receiving line rate traffic causes a kernel panic: LiquidIO_VF 0000:02:00.3: should not come here should not get rx when poll mode = 0 for vf BUG: unable to handle kernel NULL pointer dereference at (null) . . . Call Trace: ? tasklet_action+0x102/0x120 __do_softirq+0x91/0x292 irq_exit+0xb6/0xc0 do_IRQ+0x4f/0xd0 common_interrupt+0x93/0x93 RIP: 0010:cpuidle_enter_state+0x142/0x2f0 RSP: 0018:ffffffffa6403e20 EFLAGS: 00000246 ORIG_RAX: ffffffffffffff59 RAX: 0000000000000000 RBX: 0000000000000003 RCX: 000000000000001f RDX: 0000000000000000 RSI: 000000002ab7519f RDI: 0000000000000000 RBP: ffffffffa6403e58 R08: 0000000000000084 R09: 0000000000000018 R10: ffffffffa6403df0 R11: 00000000000003c7 R12: 0000000000000003 R13: ffffd27ebd806800 R14: ffffffffa64d40d8 R15: 0000007be072823f cpuidle_enter+0x17/0x20 call_cpuidle+0x23/0x40 do_idle+0x18c/0x1f0 cpu_startup_entry+0x64/0x70 rest_init+0xa5/0xb0 start_kernel+0x45e/0x46b x86_64_start_reservations+0x24/0x26 x86_64_start_kernel+0x6f/0x72 secondary_startup_64+0xa5/0xa5 Code: Bad RIP value. RIP: (null) RSP: ffff9246ed003f28 CR2: 0000000000000000 ---[ end trace 92731e80f31b7d7d ]--- Kernel panic - not syncing: Fatal exception in interrupt Kernel Offset: 0x24000000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff) ---[ end Kernel panic - not syncing: Fatal exception in interrupt Reason is: in the function assigned to net_device_ops->ndo_stop, the steps for bringing down the interface are done in the wrong order. The step that notifies the NIC firmware to stop forwarding packets to host is done too late. Fix it by moving that step to the beginning. Signed-off-by: Felix Manlunas Signed-off-by: Raghu Vatsavayi Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 2e993ce43b6612..4d2db22e011b30 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1289,6 +1289,9 @@ static int liquidio_stop(struct net_device *netdev) struct octeon_device *oct = lio->oct_dev; struct napi_struct *napi, *n; + /* tell Octeon to stop forwarding packets to host */ + send_rx_ctrl_cmd(lio, 0); + if (oct->props[lio->ifidx].napi_enabled) { list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) napi_disable(napi); @@ -1306,9 +1309,6 @@ static int liquidio_stop(struct net_device *netdev) netif_carrier_off(netdev); lio->link_changes++; - /* tell Octeon to stop forwarding packets to host */ - send_rx_ctrl_cmd(lio, 0); - ifstate_reset(lio, LIO_IFSTATE_RUNNING); txqs_stop(netdev); From 21cd9fe750941100b7b4643ffd765215b6daff64 Mon Sep 17 00:00:00 2001 From: Osama Khan Date: Sat, 21 Oct 2017 10:42:21 +0000 Subject: [PATCH 0743/3326] platform/x86: hp_accel: Add quirk for HP ProBook 440 G4 [ Upstream commit 163ca80013aafb6dc9cb295de3db7aeab9ab43f8 ] Added support for HP ProBook 440 G4 laptops by including the accelerometer orientation quirk for that device. Testing was performed based on the axis orientation guidelines here: https://www.kernel.org/doc/Documentation/misc-devices/lis3lv02d which states "If the left side is elevated, X increases (becomes positive)". When tested, on lifting the left edge, x values became increasingly negative thus indicating an inverted x-axis on the installed lis3lv02d chip. This was compensated by adding an entry for this device in hp_accel.c specifying the quirk as x_inverted. The patch was tested on a ProBook 440 G4 device and x-axis as well as y and z-axis values are now generated as per spec. Signed-off-by: Osama Khan Signed-off-by: Andy Shevchenko Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/hp_accel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index 493d8910a74e2e..7b12abe86b94f3 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -240,6 +240,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = { AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left), AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd), + AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted), AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), From 4edcbfc5651276a28a1501e03198f6f16974617c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 18 Oct 2017 13:20:01 +0200 Subject: [PATCH 0744/3326] nvme: use kref_get_unless_zero in nvme_find_get_ns [ Upstream commit 2dd4122854f697afc777582d18548dded03ce5dd ] For kref_get_unless_zero to protect against lookup vs free races we need to use it in all places where we aren't guaranteed to already hold a reference. There is no such guarantee in nvme_find_get_ns, so switch to kref_get_unless_zero in this function. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/host/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 37f9039bb9cab2..0655f45643d909 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2299,7 +2299,8 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) mutex_lock(&ctrl->namespaces_mutex); list_for_each_entry(ns, &ctrl->namespaces, list) { if (ns->ns_id == nsid) { - kref_get(&ns->kref); + if (!kref_get_unless_zero(&ns->kref)) + continue; ret = ns; break; } From ff62605e0d79c870f679bcd8571bf9cb5f7e1c93 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Wed, 25 Oct 2017 15:57:55 +0200 Subject: [PATCH 0745/3326] l2tp: cleanup l2tp_tunnel_delete calls [ Upstream commit 4dc12ffeaeac939097a3f55c881d3dc3523dff0c ] l2tp_tunnel_delete does not return anything since commit 62b982eeb458 ("l2tp: fix race condition in l2tp_tunnel_delete"). But call sites of l2tp_tunnel_delete still do casts to void to avoid unused return value warnings. Kill these now useless casts. Signed-off-by: Jiri Slaby Cc: Sabrina Dubroca Cc: Guillaume Nault Cc: David S. Miller Cc: netdev@vger.kernel.org Acked-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/l2tp/l2tp_core.c | 2 +- net/l2tp/l2tp_netlink.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 02d61101b108dc..af22aa8ae35bdd 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1891,7 +1891,7 @@ static __net_exit void l2tp_exit_net(struct net *net) rcu_read_lock_bh(); list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { - (void)l2tp_tunnel_delete(tunnel); + l2tp_tunnel_delete(tunnel); } rcu_read_unlock_bh(); diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 7135f4645d3aa6..c28223d8092b18 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -282,7 +282,7 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info l2tp_tunnel_notify(&l2tp_nl_family, info, tunnel, L2TP_CMD_TUNNEL_DELETE); - (void) l2tp_tunnel_delete(tunnel); + l2tp_tunnel_delete(tunnel); l2tp_tunnel_dec_refcount(tunnel); From 742b570da6e6447e4a98acceb27e8a7d5b436d7c Mon Sep 17 00:00:00 2001 From: Brian Foster Date: Thu, 26 Oct 2017 09:31:16 -0700 Subject: [PATCH 0746/3326] xfs: fix log block underflow during recovery cycle verification [ Upstream commit 9f2a4505800607e537e9dd9dea4f55c4b0c30c7a ] It is possible for mkfs to format very small filesystems with too small of an internal log with respect to the various minimum size and block count requirements. If this occurs when the log happens to be smaller than the scan window used for cycle verification and the scan wraps the end of the log, the start_blk calculation in xlog_find_head() underflows and leads to an attempt to scan an invalid range of log blocks. This results in log recovery failure and a failed mount. Since there may be filesystems out in the wild with this kind of geometry, we cannot simply refuse to mount. Instead, cap the scan window for cycle verification to the size of the physical log. This ensures that the cycle verification proceeds as expected when the scan wraps the end of the log. Reported-by: Zorro Lang Signed-off-by: Brian Foster Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/xfs/xfs_log_recover.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index ee34899396b263..d6e049fdd977d7 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -753,7 +753,7 @@ xlog_find_head( * in the in-core log. The following number can be made tighter if * we actually look at the block size of the filesystem. */ - num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); + num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log)); if (head_blk >= num_scan_bblks) { /* * We are guaranteed that the entire check can be performed From 61bc71d34a00801657591e142f374794b6dc3548 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 17 Oct 2017 21:37:32 -0700 Subject: [PATCH 0747/3326] xfs: return a distinct error code value for IGET_INCORE cache misses [ Upstream commit ed438b476b611c67089760037139f93ea8ed41d5 ] For an XFS_IGET_INCORE iget operation, if the inode isn't in the cache, return ENODATA so that we don't confuse it with the pre-existing ENOENT cases (inode is in cache, but freed). Signed-off-by: Darrick J. Wong Reviewed-by: Brian Foster Reviewed-by: Dave Chinner Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/xfs/xfs_icache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 34227115a5d6e7..43005fbe8b1eef 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -610,7 +610,7 @@ xfs_iget( } else { rcu_read_unlock(); if (flags & XFS_IGET_INCORE) { - error = -ENOENT; + error = -ENODATA; goto out_error_or_again; } XFS_STATS_INC(mp, xs_ig_missed); From 4be2d1ad59d0d9251bd1fc618a6d1266b16e3b75 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 17 Oct 2017 14:16:19 -0700 Subject: [PATCH 0748/3326] xfs: fix incorrect extent state in xfs_bmap_add_extent_unwritten_real [ Upstream commit 5e422f5e4fd71d18bc6b851eeb3864477b3d842e ] There was one spot in xfs_bmap_add_extent_unwritten_real that didn't use the passed in new extent state but always converted to normal, leading to wrong behavior when converting from normal to unwritten. Only found by code inspection, it seems like this code path to move partial extent from written to unwritten while merging it with the next extent is rarely exercised. Signed-off-by: Christoph Hellwig Reviewed-by: Brian Foster Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/xfs/libxfs/xfs_bmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 89263797cf3253..a3cc8afed367c7 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -2560,7 +2560,7 @@ xfs_bmap_add_extent_unwritten_real( &i))) goto done; XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); - cur->bc_rec.b.br_state = XFS_EXT_NORM; + cur->bc_rec.b.br_state = new->br_state; if ((error = xfs_btree_insert(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); From 68a6765b70d90debde63bd26599c3422116751ef Mon Sep 17 00:00:00 2001 From: Egil Hjelmeland Date: Tue, 24 Oct 2017 17:14:10 +0200 Subject: [PATCH 0749/3326] net: dsa: lan9303: Do not disable switch fabric port 0 at .probe [ Upstream commit 3c91b0c1de8d013490bbc41ce9ee8810ea5baddd ] Make the LAN9303 work when lan9303_probe() is called twice. For some unknown reason the LAN9303 switch fail to forward data when switch fabric port 0 TX is disabled during probe. (Write of LAN9303_MAC_TX_CFG_0 in lan9303_disable_processing_port().) In that situation the switch fabric seem to receive frames, because the ALR is learning addresses. But no frames are transmitted on any of the ports. In our system lan9303_probe() is called twice, first time dsa_register_switch() return -EPROBE_DEFER. As an experiment, modified the code to skip writing LAN9303_MAC_TX_CFG_0, port 0 during the first probe. Then the switch works as expected. Resolve the problem by not calling lan9303_disable_processing_port() on port 0 during probe. Ports 1 and 2 are still disabled. Although unsatisfying that the exact failure mechanism is not known, the patch should not cause any harm. Signed-off-by: Egil Hjelmeland Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/dsa/lan9303-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index b471413d3df982..1e5a69b9d90a35 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -569,7 +569,7 @@ static int lan9303_disable_processing(struct lan9303 *chip) { int p; - for (p = 0; p < LAN9303_NUM_PORTS; p++) { + for (p = 1; p < LAN9303_NUM_PORTS; p++) { int ret = lan9303_disable_processing_port(chip, p); if (ret) From 0949f8afa8f1b9d6ebce48606e41dfcdafbaca4a Mon Sep 17 00:00:00 2001 From: Lipeng Date: Tue, 24 Oct 2017 21:02:11 +0800 Subject: [PATCH 0750/3326] net: hns3: fix a bug in hclge_uninit_client_instance [ Upstream commit a17dcf3f0124698d1120da71574bf4c339e5a368 ] HNS3 driver initialize hdev->roce_client and vport->roce.client in hclge_init_client_instance, and need set hdev->roce_client and vport->roce.client NULL. If do not set them NULL when uninit, it will fail in the scene: insmod hns3.ko, hns-roce.ko, hns-roce-hw-v3.ko successfully, but rmmod hns3.ko after rmmod hns-roce-hw-v2.ko and hns-roce.ko. This patch fixes the issue. Fixes: 46a3df9 (net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support) Signed-off-by: Lipeng Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c1cdbfd83bdba9..1df84a874d958a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4007,13 +4007,19 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { vport = &hdev->vport[i]; - if (hdev->roce_client) + if (hdev->roce_client) { hdev->roce_client->ops->uninit_instance(&vport->roce, 0); + hdev->roce_client = NULL; + vport->roce.client = NULL; + } if (client->type == HNAE3_CLIENT_ROCE) return; - if (client->ops->uninit_instance) + if (client->ops->uninit_instance) { client->ops->uninit_instance(&vport->nic, 0); + hdev->nic_client = NULL; + vport->nic.client = NULL; + } } } From 1d9205558e50b7af5fe5b054bf26f93c9255786e Mon Sep 17 00:00:00 2001 From: Lipeng Date: Tue, 24 Oct 2017 21:02:10 +0800 Subject: [PATCH 0751/3326] net: hns3: add nic_client check when initialize roce base information [ Upstream commit 3a46f34d20d453f09defb76b11a567647939c0aa ] Roce driver works base on HNS3 driver.If insmod Roce driver before NIC driver there is a error because do not check nic_client. This patch adds nic_client check when initialize roce base information. Fixes: 46a3df9 (net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support) Signed-off-by: Lipeng Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 1df84a874d958a..a0ef97e7f3c935 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3981,7 +3981,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, vport->roce.client = client; } - if (hdev->roce_client) { + if (hdev->roce_client && hdev->nic_client) { ret = hclge_init_roce_base_info(vport); if (ret) goto err; From 9d48d002a2f68e35bea2872a5533436848f30a60 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Tue, 24 Oct 2017 21:02:09 +0800 Subject: [PATCH 0752/3326] net: hns3: fix the bug of hns3_set_txbd_baseinfo [ Upstream commit 7036d26f328f12a323069eb16d965055b4cb3795 ] The SC bits of TX BD mean switch control. For this area, value 0 indicates no switch control, the packet is routed according to the forwarding table. Value 1 indicates that the packet is transmitted to the network bypassing the forwarding table. As HNS3 driver need support VF later, VF conmunicate with its own PF need forwarding table. This patch sets SC bits of TX BD 0 and use forwarding table. Fixes: 76ad4f0 (net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC) Signed-off-by: Lipeng Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 981fc62ce04343..52687f0691f157 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -721,7 +721,7 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) HNS3_TXD_BDTYPE_M, 0); hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1); + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); } static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, From 18c8d94eac0fd6c859033b2eb05941f19c3f7b4a Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 25 Oct 2017 07:41:11 +0300 Subject: [PATCH 0753/3326] RDMA/cxgb4: Declare stag as __be32 [ Upstream commit 35fb2a88ed4b77356fa679a8525c869a3594e287 ] The scqe.stag is actually __b32, fix it. drivers/infiniband/hw/cxgb4/cq.c:754:52: warning: cast to restricted __be32 Cc: Steve Wise Signed-off-by: Leon Romanovsky Reviewed-by: Steve Wise Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/cxgb4/t4.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index e765c00303cdaa..bcb80ca67d3d33 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -171,7 +171,7 @@ struct t4_cqe { __be32 msn; } rcqe; struct { - u32 stag; + __be32 stag; u16 nada2; u16 cidx; } scqe; From 96ed7ca7323be977d617d077146b322e844bdcb9 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Wed, 11 Oct 2017 15:35:56 -0600 Subject: [PATCH 0754/3326] PCI: Detach driver before procfs & sysfs teardown on device remove [ Upstream commit 16b6c8bb687cc3bec914de09061fcb8411951fda ] When removing a device, for example a VF being removed due to SR-IOV teardown, a "soft" hot-unplug via 'echo 1 > remove' in sysfs, or an actual hot-unplug, we first remove the procfs and sysfs attributes for the device before attempting to release the device from any driver bound to it. Unbinding the driver from the device can take time. The device might need to write out data or it might be actively in use. If it's in use by userspace through a vfio driver, the unbind might block until the user releases the device. This leads to a potentially non-trivial amount of time where the device exists, but we've torn down the interfaces that userspace uses to examine devices, for instance lspci might generate this sort of error: pcilib: Cannot open /sys/bus/pci/devices/0000:01:0a.3/config lspci: Unable to read the standard configuration space header of device 0000:01:0a.3 We don't seem to have any dependence on this teardown ordering in the kernel, so let's unbind the driver first, which is also more symmetric with the instantiation of the device in pci_bus_add_device(). Signed-off-by: Alex Williamson Signed-off-by: Bjorn Helgaas Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/pci/remove.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 73a03d3825903b..2fa0dbde36b7ab 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -19,9 +19,9 @@ static void pci_stop_dev(struct pci_dev *dev) pci_pme_active(dev, false); if (dev->is_added) { + device_release_driver(&dev->dev); pci_proc_detach_device(dev); pci_remove_sysfs_dev_files(dev); - device_release_driver(&dev->dev); dev->is_added = 0; } From b61aba91eb72efc5117d24d3f4455dcaf67f1e63 Mon Sep 17 00:00:00 2001 From: Xiaofei Tan Date: Tue, 24 Oct 2017 23:51:38 +0800 Subject: [PATCH 0755/3326] scsi: hisi_sas: fix the risk of freeing slot twice [ Upstream commit 6ba0fbc35aa9f3bc8c12be3b4047055c9ce2ac92 ] The function hisi_sas_slot_task_free() is used to free the slot and do tidy-up of LLDD resources. The LLDD generally should know the state of a slot and decide when to free it, and it should only be done once. For some scenarios, we really don't know the state, like when TMF timeout. In this case, we check task->lldd_task before calling hisi_sas_slot_task_free(). However, we may miss some scenarios when we should also check task->lldd_task, and it is not SMP safe to check task->lldd_task as we don't protect it within spin lock. This patch is to fix this risk of freeing slot twice, as follows: 1. Check task->lldd_task in the hisi_sas_slot_task_free(), and give up freeing of this time if task->lldd_task is NULL. 2. Set slot->buf to NULL after it is freed. Signed-off-by: Xiaofei Tan Signed-off-by: John Garry Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/hisi_sas/hisi_sas_main.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 16664f2e15fbad..8fa9bb336ad4a6 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -185,13 +185,16 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; + if (!task->lldd_task) + return; + + task->lldd_task = NULL; + if (!sas_protocol_ata(task->task_proto)) if (slot->n_elem) dma_unmap_sg(dev, task->scatter, slot->n_elem, task->data_dir); - task->lldd_task = NULL; - if (sas_dev) atomic64_dec(&sas_dev->running_req); } @@ -199,8 +202,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, if (slot->buf) dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma); - list_del_init(&slot->entry); + slot->buf = NULL; slot->task = NULL; slot->port = NULL; hisi_sas_slot_index_free(hisi_hba, slot->idx); From 7a7797199d9b96989ab5f30eadad077a8dc7c970 Mon Sep 17 00:00:00 2001 From: Martin Wilck Date: Fri, 20 Oct 2017 16:51:14 -0500 Subject: [PATCH 0756/3326] scsi: hpsa: cleanup sas_phy structures in sysfs when unloading [ Upstream commit 55ca38b4255bb336c2d35990bdb2b368e19b435a ] I am resubmitting this patch on behalf of Martin Wilck with his permission. The original patch can be found here: https://www.spinics.net/lists/linux-scsi/msg102083.html This patch did not help until Hannes's commit 9441284fbc39 ("scsi-fixup-kernel-warning-during-rmmod") was applied to the kernel. -------------------------------------- Original patch description from Martin: -------------------------------------- When the hpsa module is unloaded using rmmod, dangling symlinks remain under /sys/class/sas_phy. Fix this by calling sas_phy_delete() rather than sas_phy_free (which, according to comments, should not be called for PHYs that have been set up successfully, anyway). Tested-by: Don Brace Reviewed-by: Don Brace Signed-off-by: Martin Wilck Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/hpsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 4ed3d26ffdde80..832bc83fcd8e6a 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -9207,9 +9207,9 @@ static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy) struct sas_phy *phy = hpsa_sas_phy->phy; sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy); - sas_phy_free(phy); if (hpsa_sas_phy->added_to_port) list_del(&hpsa_sas_phy->phy_list_entry); + sas_phy_delete(phy); kfree(hpsa_sas_phy); } From 8268e93b2b879912ae84952acef17e4c78f3b66d Mon Sep 17 00:00:00 2001 From: Martin Wilck Date: Fri, 20 Oct 2017 16:51:08 -0500 Subject: [PATCH 0757/3326] scsi: hpsa: destroy sas transport properties before scsi_host [ Upstream commit dfb2e6f46b3074eb85203d8f0888b71ec1c2e37a ] This patch cleans up a lot of warnings when unloading the driver. A current example of the stack trace starts with: [ 142.570715] sysfs group 'power' not found for kobject 'port-5:0' There can be hundreds of these messages during a driver unload. I am resubmitting this patch on behalf of Martin Wilck with his permission. His original patch can be found here: https://www.spinics.net/lists/linux-scsi/msg102085.html This patch did not help until Hannes's commit 9441284fbc39 ("scsi-fixup-kernel-warning-during-rmmod") was applied to the kernel. --------------------------- Original patch description: --------------------------- Unloading the hpsa driver causes warnings [ 1063.793652] WARNING: CPU: 1 PID: 4850 at ../fs/sysfs/group.c:237 device_del+0x54/0x240() [ 1063.793659] sysfs group ffffffff81cf21a0 not found for kobject 'port-2:0' with two different stacks: 1) [ 1063.793774] [] device_del+0x54/0x240 [ 1063.793780] [] transport_remove_classdev+0x4a/0x60 [ 1063.793784] [] attribute_container_device_trigger+0xa6/0xb0 [ 1063.793802] [] sas_port_delete+0x126/0x160 [scsi_transport_sas] [ 1063.793819] [] hpsa_free_sas_port+0x3c/0x70 [hpsa] 2) [ 1063.797103] [] device_del+0x54/0x240 [ 1063.797118] [] sas_port_delete+0x12e/0x160 [scsi_transport_sas] [ 1063.797134] [] hpsa_free_sas_port+0x3c/0x70 [hpsa] This is caused by the fact that host device hostX is deleted before the SAS transport devices hostX/port-a:b. This patch fixes this by reverting the order of device deletions. Tested-by: Don Brace Reviewed-by: Don Brace Signed-off-by: Martin Wilck Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/hpsa.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 832bc83fcd8e6a..5fbaf13781b6c0 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -8684,6 +8684,8 @@ static void hpsa_remove_one(struct pci_dev *pdev) destroy_workqueue(h->rescan_ctlr_wq); destroy_workqueue(h->resubmit_wq); + hpsa_delete_sas_host(h); + /* * Call before disabling interrupts. * scsi_remove_host can trigger I/O operations especially @@ -8718,8 +8720,6 @@ static void hpsa_remove_one(struct pci_dev *pdev) h->lockup_detected = NULL; /* init_one 2 */ /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */ - hpsa_delete_sas_host(h); - kfree(h); /* init_one 1 */ } From aeb01451f9e2d3e9b2ac2ad30426c68957c307ed Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Sat, 14 Oct 2017 01:06:56 +0300 Subject: [PATCH 0758/3326] mfd: mxs-lradc: Fix error handling in mxs_lradc_probe() [ Upstream commit 362741a21a5c4b9ee31e75ce28d63c6d238a745c ] There is the only path, where mxs_lradc_probe() leaves clk undisabled, since it does return instead of goto err_clk. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Signed-off-by: Lee Jones Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/mfd/mxs-lradc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/mfd/mxs-lradc.c b/drivers/mfd/mxs-lradc.c index 630bd19b2c0a57..98e732a7ae96f0 100644 --- a/drivers/mfd/mxs-lradc.c +++ b/drivers/mfd/mxs-lradc.c @@ -196,8 +196,10 @@ static int mxs_lradc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, lradc); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENOMEM; + if (!res) { + ret = -ENOMEM; + goto err_clk; + } switch (lradc->soc) { case IMX23_LRADC: From 9409e1c775f35b2571be427cde272ce80cdc19e7 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Mon, 23 Oct 2017 19:51:05 +0800 Subject: [PATCH 0759/3326] net: hns3: fix the TX/RX ring.queue_index in hns3_ring_get_cfg [ Upstream commit 66b447301ac710ee237dba8b653244018fbb6168 ] The interface hns3_ring_get_cfg only update TX ring queue_index, but do not update RX ring queue_index. This patch fixes it. Fixes: 76ad4f0 (net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC) Signed-off-by: Lipeng Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 52687f0691f157..8944d836ac9a1b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -2488,16 +2488,16 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, if (ring_type == HNAE3_RING_TYPE_TX) { ring_data[q->tqp_index].ring = ring; + ring_data[q->tqp_index].queue_index = q->tqp_index; ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; } else { ring_data[q->tqp_index + queue_num].ring = ring; + ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; ring->io_base = q->io_base; } hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); - ring_data[q->tqp_index].queue_index = q->tqp_index; - ring->tqp = q; ring->desc = NULL; ring->desc_cb = NULL; From 122e18d27d77031b909a06f110f6da225502d033 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Mon, 23 Oct 2017 19:51:02 +0800 Subject: [PATCH 0760/3326] net: hns3: fix the bug when map buffer fail [ Upstream commit 564883bb4dc1a4f3cba6344e77743175694b0761 ] If one buffer had been recieved to stack, driver will alloc a new buffer, map the buffer to device and replace the old buffer. When map fail, should only free the new alloced buffer, but not free all buffers in the ring. Fixes: 76ad4f0 (net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC) Signed-off-by: Lipeng Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 8944d836ac9a1b..ea121b0b16ac9c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1546,7 +1546,7 @@ static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, return 0; out_with_buf: - hns3_free_buffers(ring); + hns3_free_buffer(ring, cb); out: return ret; } From 669ff2a9aa71edea93a0b74c1273080cad68dfb7 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Mon, 23 Oct 2017 19:51:01 +0800 Subject: [PATCH 0761/3326] net: hns3: fix a bug when alloc new buffer [ Upstream commit b9077428ec5569aacb2952d8a2ffb51c8988d3c2 ] When alloce new buffer to HW, should unmap the old buffer first. This old code map the old buffer but not unmap the old buffer, this patch fixes it. Fixes: 76ad4f0 (net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC) Signed-off-by: Lipeng Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index ea121b0b16ac9c..186772493711e2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1586,7 +1586,7 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, struct hns3_desc_cb *res_cb) { - hns3_map_buffer(ring, &ring->desc_cb[i]); + hns3_unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); } From b59dc14b0b102d42d6bd54ae77848ddc00a4596c Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 16 Oct 2017 15:06:19 +0200 Subject: [PATCH 0762/3326] serdev: ttyport: enforce tty-driver open() requirement [ Upstream commit dee7d0f3b200c67c6ee96bd37c6e8fa52690ab56 ] The tty-driver open routine is mandatory, but the serdev tty-port-controller implementation did not treat it as such and would instead fall back to calling tty_port_open() directly. Signed-off-by: Johan Hovold Acked-by: Rob Herring Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serdev/serdev-ttyport.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index 7f785d77ba7f40..69fc6d9ab490f9 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c @@ -120,10 +120,10 @@ static int ttyport_open(struct serdev_controller *ctrl) return PTR_ERR(tty); serport->tty = tty; - if (tty->ops->open) - tty->ops->open(serport->tty, NULL); - else - tty_port_open(serport->port, tty, NULL); + if (!tty->ops->open) + goto err_unlock; + + tty->ops->open(serport->tty, NULL); /* Bring the UART into a known 8 bits no parity hw fc state */ ktermios = tty->termios; @@ -140,6 +140,12 @@ static int ttyport_open(struct serdev_controller *ctrl) tty_unlock(serport->tty); return 0; + +err_unlock: + tty_unlock(tty); + tty_release_struct(tty, serport->tty_idx); + + return -ENODEV; } static void ttyport_close(struct serdev_controller *ctrl) From 8ee1eada4f1a6671517dc51f6bc11174faebea02 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 9 Oct 2017 21:52:44 +1100 Subject: [PATCH 0763/3326] powerpc/perf/hv-24x7: Fix incorrect comparison in memord [ Upstream commit 05c14c03138532a3cb2aa29c2960445c8753343b ] In the hv-24x7 code there is a function memord() which tries to implement a sort function return -1, 0, 1. However one of the conditions is incorrect, such that it can never be true, because we will have already returned. I don't believe there is a bug in practice though, because the comparisons are an optimisation prior to calling memcmp(). Fix it by swapping the second comparision, so it can be true. Reported-by: David Binderman Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/perf/hv-24x7.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 9c88b82f62293a..72238eedc360fa 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -540,7 +540,7 @@ static int memord(const void *d1, size_t s1, const void *d2, size_t s2) { if (s1 < s2) return 1; - if (s2 > s1) + if (s1 > s2) return -1; return memcmp(d1, d2, s1); From 9cd01922985ce5c00cf050f12daef304cc452761 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Tue, 17 Oct 2017 16:20:18 -0200 Subject: [PATCH 0764/3326] powerpc/xmon: Check before calling xive functions [ Upstream commit 402e172a2ce76210f2fe921cf419d12103851344 ] Currently xmon could call XIVE functions from OPAL even if the XIVE is disabled or does not exist in the system, as in POWER8 machines. This causes the following exception: 1:mon> dx cpu 0x1: Vector: 700 (Program Check) at [c000000423c93450] pc: c00000000009cfa4: opal_xive_dump+0x50/0x68 lr: c0000000000997b8: opal_return+0x0/0x50 This patch simply checks if XIVE is enabled before calling XIVE functions. Fixes: 243e25112d06 ("powerpc/xive: Native exploitation of the XIVE interrupt controller") Suggested-by: Guilherme G. Piccoli Signed-off-by: Breno Leitao Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/xmon/xmon.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 33351c6704b1d0..c008083fbc4f63 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2475,6 +2475,11 @@ static void dump_xives(void) unsigned long num; int c; + if (!xive_enabled()) { + printf("Xive disabled on this system\n"); + return; + } + c = inchar(); if (c == 'a') { dump_all_xives(); From 48185ffb6dc3eb64846039117d76148d3e3d7022 Mon Sep 17 00:00:00 2001 From: Matthias Brugger Date: Sat, 21 Oct 2017 10:17:47 +0200 Subject: [PATCH 0765/3326] soc: mediatek: pwrap: fix compiler errors [ Upstream commit fb2c1934f30577756e55e24e8870b45c78da3bc2 ] When compiling using sparse, we got the following error: drivers/soc/mediatek/mtk-pmic-wrap.c:686:25: error: dubious one-bit signed bitfield Changing the data type to unsigned fixes this. Signed-off-by: Matthias Brugger Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/soc/mediatek/mtk-pmic-wrap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index c2048382830f74..e3df1e96b1417a 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -522,7 +522,7 @@ struct pmic_wrapper_type { u32 int_en_all; u32 spi_w; u32 wdt_src; - int has_bridge:1; + unsigned int has_bridge:1; int (*init_reg_clock)(struct pmic_wrapper *wrp); int (*init_soc_specific)(struct pmic_wrapper *wrp); }; From fdfcb06c5944afb78007cfc25e527ff689b2a344 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 18 Oct 2017 17:02:03 -0700 Subject: [PATCH 0766/3326] ipv4: ipv4_default_advmss() should use route mtu [ Upstream commit 164a5e7ad531e181334a3d3f03d0d5ad20d6faea ] ipv4_default_advmss() incorrectly uses the device MTU instead of the route provided one. IPv6 has the proper behavior, lets harmonize the two protocols. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv4/route.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 647cfc972bde0f..804bead564db8e 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1254,7 +1254,7 @@ static void set_class_tag(struct rtable *rt, u32 tag) static unsigned int ipv4_default_advmss(const struct dst_entry *dst) { unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr); - unsigned int advmss = max_t(unsigned int, dst->dev->mtu - header_size, + unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size, ip_rt_min_advmss); return min(advmss, IPV4_MAX_PMTU - header_size); From b53af57679492180417d584be410b1134de15cd2 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Thu, 19 Oct 2017 07:00:34 +0800 Subject: [PATCH 0767/3326] KVM: nVMX: Fix EPT switching advertising MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 575b3a2cb439b03fd603ea77c73c76f3ed237596 ] I can use vmxcap tool to observe "EPTP Switching yes" even if EPT is not exposed to L1. EPT switching is advertised unconditionally since it is emulated, however, it can be treated as an extended feature for EPT and it should not be advertised if EPT itself is not exposed. This patch fixes it. Reviewed-by: David Hildenbrand Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Jim Mattson Signed-off-by: Wanpeng Li Signed-off-by: Radim Krčmář Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f366e6d3a5e1f5..bc5921c1e2f23d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2845,8 +2845,9 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) * Advertise EPTP switching unconditionally * since we emulate it */ - vmx->nested.nested_vmx_vmfunc_controls = - VMX_VMFUNC_EPTP_SWITCHING; + if (enable_ept) + vmx->nested.nested_vmx_vmfunc_controls = + VMX_VMFUNC_EPTP_SWITCHING; } /* From 93fc8284472124140a7946c4e5f555e06808f4a2 Mon Sep 17 00:00:00 2001 From: nixiaoming Date: Fri, 15 Sep 2017 17:45:56 +0800 Subject: [PATCH 0768/3326] tty fix oops when rmmod 8250 [ Upstream commit c79dde629d2027ca80329c62854a7635e623d527 ] After rmmod 8250.ko tty_kref_put starts kwork (release_one_tty) to release proc interface oops when accessing driver->driver_name in proc_tty_unregister_driver Use jprobe, found driver->driver_name point to 8250.ko static static struct uart_driver serial8250_reg .driver_name= serial, Use name in proc_dir_entry instead of driver->driver_name to fix oops test on linux 4.1.12: BUG: unable to handle kernel paging request at ffffffffa01979de IP: [] strchr+0x0/0x30 PGD 1a0d067 PUD 1a0e063 PMD 851c1f067 PTE 0 Oops: 0000 [#1] PREEMPT SMP Modules linked in: ... ... [last unloaded: 8250] CPU: 7 PID: 116 Comm: kworker/7:1 Tainted: G O 4.1.12 #1 Hardware name: Insyde RiverForest/Type2 - Board Product Name1, BIOS NE5KV904 12/21/2015 Workqueue: events release_one_tty task: ffff88085b684960 ti: ffff880852884000 task.ti: ffff880852884000 RIP: 0010:[] [] strchr+0x0/0x30 RSP: 0018:ffff880852887c90 EFLAGS: 00010282 RAX: ffffffff81a5eca0 RBX: ffffffffa01979de RCX: 0000000000000004 RDX: ffff880852887d10 RSI: 000000000000002f RDI: ffffffffa01979de RBP: ffff880852887cd8 R08: 0000000000000000 R09: ffff88085f5d94d0 R10: 0000000000000195 R11: 0000000000000000 R12: ffffffffa01979de R13: ffff880852887d00 R14: ffffffffa01979de R15: ffff88085f02e840 FS: 0000000000000000(0000) GS:ffff88085f5c0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffffffffa01979de CR3: 0000000001a0c000 CR4: 00000000001406e0 Stack: ffffffff812349b1 ffff880852887cb8 ffff880852887d10 ffff88085f5cd6c2 ffff880852800a80 ffffffffa01979de ffff880852800a84 0000000000000010 ffff88085bb28bd8 ffff880852887d38 ffffffff812354f0 ffff880852887d08 Call Trace: [] ? __xlate_proc_name+0x71/0xd0 [] remove_proc_entry+0x40/0x180 [] ? _raw_spin_lock_irqsave+0x41/0x60 [] ? destruct_tty_driver+0x60/0xe0 [] proc_tty_unregister_driver+0x28/0x40 [] destruct_tty_driver+0x88/0xe0 [] tty_driver_kref_put+0x1d/0x20 [] release_one_tty+0x5a/0xd0 [] process_one_work+0x139/0x420 [] worker_thread+0x121/0x450 [] ? process_scheduled_works+0x40/0x40 [] kthread+0xec/0x110 [] ? tg_rt_schedulable+0x210/0x220 [] ? kthread_freezable_should_stop+0x80/0x80 [] ret_from_fork+0x42/0x70 [] ? kthread_freezable_should_stop+0x80/0x80 Signed-off-by: nixiaoming Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/proc/proc_tty.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c index 2da657848cfc61..d0cf1c50bb6c22 100644 --- a/fs/proc/proc_tty.c +++ b/fs/proc/proc_tty.c @@ -15,6 +15,7 @@ #include #include #include +#include "internal.h" /* * The /proc/tty directory inodes... @@ -165,7 +166,7 @@ void proc_tty_unregister_driver(struct tty_driver *driver) if (!ent) return; - remove_proc_entry(driver->driver_name, proc_tty_driver); + remove_proc_entry(ent->name, proc_tty_driver); driver->proc_entry = NULL; } From 4684da79d20edf232e33442775506e600fc4fe6f Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Wed, 18 Oct 2017 12:21:55 -0600 Subject: [PATCH 0769/3326] dev/dax: fix uninitialized variable build warning [ Upstream commit 0a3ff78699d1817e711441715d22665475466036 ] Fix this build warning: warning: 'phys' may be used uninitialized in this function [-Wuninitialized] As reported here: https://lkml.org/lkml/2017/10/16/152 http://kisskb.ellerman.id.au/kisskb/buildresult/13181373/log/ Signed-off-by: Ross Zwisler Signed-off-by: Dan Williams Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/dax/device.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 375b99bca002c9..7b0bf825c4e73c 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -222,7 +222,8 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, unsigned long size) { struct resource *res; - phys_addr_t phys; + /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */ + phys_addr_t uninitialized_var(phys); int i; for (i = 0; i < dev_dax->num_resources; i++) { From 57db94d4b72479c6c1e31d18e83a581d5d6d0ee9 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 11 Oct 2017 11:57:15 +0200 Subject: [PATCH 0770/3326] pinctrl: adi2: Fix Kconfig build problem [ Upstream commit 1c363531dd814dc4fe10865722bf6b0f72ce4673 ] The build robot is complaining on Blackfin: drivers/pinctrl/pinctrl-adi2.c: In function 'port_setup': >> drivers/pinctrl/pinctrl-adi2.c:221:21: error: dereferencing pointer to incomplete type 'struct gpio_port_t' writew(readw(®s->port_fer) & ~BIT(offset), ^~ drivers/pinctrl/pinctrl-adi2.c: In function 'adi_gpio_ack_irq': >> drivers/pinctrl/pinctrl-adi2.c:266:18: error: dereferencing pointer to incomplete type 'struct bfin_pint_regs' if (readl(®s->invert_set) & pintbit) ^~ It seems the driver need to include and to compile. The Blackfin architecture was re-defining the Kconfig PINCTRL symbol which is not OK, so replaced this with PINCTRL_BLACKFIN_ADI2 which selects PINCTRL and PINCTRL_ADI2 just like most arches do. Further, the old GPIO driver symbol GPIO_ADI was possible to select at the same time as selecting PINCTRL. This was not working because the arch-local header contains an explicit #ifndef PINCTRL clause making compilation break if you combine them. The same is true for DEBUG_MMRS. Make sure the ADI2 pinctrl driver is not selected at the same time as the old GPIO implementation. (This should be converted to use gpiolib or pincontrol and move to drivers/...) Also make sure the old GPIO_ADI driver or DEBUG_MMRS is not selected at the same time as the new PINCTRL implementation, and only make PINCTRL_ADI2 selectable for the Blackfin families that actually have it. This way it is still possible to add e.g. I2C-based pin control expanders on the Blackfin. Cc: Steven Miao Cc: Huanhuan Feng Signed-off-by: Linus Walleij Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/blackfin/Kconfig | 7 +++++-- arch/blackfin/Kconfig.debug | 1 + drivers/pinctrl/Kconfig | 3 ++- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index af5369422032b1..d9c2866ba618ce 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -321,11 +321,14 @@ config BF53x config GPIO_ADI def_bool y + depends on !PINCTRL depends on (BF51x || BF52x || BF53x || BF538 || BF539 || BF561) -config PINCTRL +config PINCTRL_BLACKFIN_ADI2 def_bool y - depends on BF54x || BF60x + depends on (BF54x || BF60x) + select PINCTRL + select PINCTRL_ADI2 config MEM_MT48LC64M4A2FB_7E bool diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug index 4ddd1b73ee3e5e..c8d957274cc202 100644 --- a/arch/blackfin/Kconfig.debug +++ b/arch/blackfin/Kconfig.debug @@ -18,6 +18,7 @@ config DEBUG_VERBOSE config DEBUG_MMRS tristate "Generate Blackfin MMR tree" + depends on !PINCTRL select DEBUG_FS help Create a tree of Blackfin MMRs via the debugfs tree. If diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 82cd8b08d71f52..a73c794bed035b 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -33,7 +33,8 @@ config DEBUG_PINCTRL config PINCTRL_ADI2 bool "ADI pin controller driver" - depends on BLACKFIN + depends on (BF54x || BF60x) + depends on !GPIO_ADI select PINMUX select IRQ_DOMAIN help From 8151940acc356ba61e0992f464fca38118860c31 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 17 Oct 2017 16:18:36 +1100 Subject: [PATCH 0771/3326] raid5: Set R5_Expanded on parity devices as well as data. [ Upstream commit 235b6003fb28f0dd8e7ed8fbdb088bb548291766 ] When reshaping a fully degraded raid5/raid6 to a larger nubmer of devices, the new device(s) are not in-sync and so that can make the newly grown stripe appear to be "failed". To avoid this, we set the R5_Expanded flag to say "Even though this device is not fully in-sync, this block is safe so don't treat the device as failed for this stripe". This flag is set for data devices, not not for parity devices. Consequently, if you have a RAID6 with two devices that are partly recovered and a spare, and start a reshape to include the spare, then when the reshape gets past the point where the recovery was up to, it will think the stripes are failed and will get into an infinite loop, failing to make progress. So when contructing parity on an EXPAND_READY stripe, set R5_Expanded. Reported-by: Curt Signed-off-by: NeilBrown Signed-off-by: Shaohua Li Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/md/raid5.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7aed69a4f65529..c406f16f529579 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1818,8 +1818,11 @@ static void ops_complete_reconstruct(void *stripe_head_ref) struct r5dev *dev = &sh->dev[i]; if (dev->written || i == pd_idx || i == qd_idx) { - if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) + if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) { set_bit(R5_UPTODATE, &dev->flags); + if (test_bit(STRIPE_EXPAND_READY, &sh->state)) + set_bit(R5_Expanded, &dev->flags); + } if (fua) set_bit(R5_WantFUA, &dev->flags); if (sync) From 84607a2958762c94741fc96e947e6a134cd5f5c3 Mon Sep 17 00:00:00 2001 From: Kurt Garloff Date: Tue, 17 Oct 2017 09:10:45 +0200 Subject: [PATCH 0772/3326] scsi: scsi_devinfo: Add REPORTLUN2 to EMC SYMMETRIX blacklist entry [ Upstream commit 909cf3e16a5274fe2127cf3cea5c8dba77b2c412 ] All EMC SYMMETRIX support REPORT_LUNS, even if configured to report SCSI-2 for whatever reason. Signed-off-by: Kurt Garloff Signed-off-by: Hannes Reinecke Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/scsi_devinfo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 6bf43d94cdc0ca..b19b00adacb24e 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -161,7 +161,7 @@ static struct { {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */ {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */ {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, - {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN}, + {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2}, {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN}, {"easyRAID", "16P", NULL, BLIST_NOREPORTLUN}, {"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN}, From 9cb27c88b080274e5f94ae7a7c4da7275925ab33 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Mon, 16 Oct 2017 08:45:15 +0300 Subject: [PATCH 0773/3326] IB/core: Fix use workqueue without WQ_MEM_RECLAIM [ Upstream commit 39baf10310e6669564a485b55267fae70a4e44ae ] The IB/core provides address resolution service and invokes callback handler when address resolve request completes of requester in worker thread context. Such caller might allocate or free memory in callback handler depending on the completion status to make further progress or to terminate a connection. Most ULPs resolve route which involves allocating route entry and path record elements in callback event handler. It has been noticed that WQ_MEM_RECLAIM flag should not be used for workers that tend to allocate memory in this [1] thread discussion. In order to mitigate this situation, WQ_MEM_RECLAIM flag was dropped for other such WQs in this [2] patch. Similar problem might arise with address resolution path, though its not yet noticed. The ib_addr workqueue is not memory reclaim path due to its nature of invoking callback that might allocate memory or don't free any memory under memory pressure. [1] https://www.spinics.net/lists/linux-rdma/msg53239.html [2] https://www.spinics.net/lists/linux-rdma/msg53416.html Fixes: f54816261c2b ("IB/addr: Remove deprecated create_singlethread_workqueue") Fixes: 5fff41e1f89d ("IB/core: Fix race condition in resolving IP to MAC") Signed-off-by: Parav Pandit Reviewed-by: Daniel Jurgens Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/addr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 12523f630b6147..d2f74721b3ba65 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -852,7 +852,7 @@ static struct notifier_block nb = { int addr_init(void) { - addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM); + addr_wq = alloc_ordered_workqueue("ib_addr", 0); if (!addr_wq) return -ENOMEM; From b57259ca055f7f2c7237dcc0e5aac446b2fa6da9 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Mon, 16 Oct 2017 08:45:16 +0300 Subject: [PATCH 0774/3326] IB/core: Fix calculation of maximum RoCE MTU [ Upstream commit 99260132fde7bddc6e0132ce53da94d1c9ccabcb ] The original code only took into consideration the largest header possible after the IB_BTH_BYTES. This was incorrect, as the largest possible header size is the largest possible combination of headers we might run into. The new code accounts for all possible headers in the largest possible combination and subtracts that from the MTU to make sure that all packets will fit on the wire. Link: https://www.spinics.net/lists/linux-rdma/msg54558.html Fixes: 3c86aa70bf67 ("RDMA/cm: Add RDMA CM support for IBoE devices") Signed-off-by: Parav Pandit Reviewed-by: Daniel Jurgens Reported-by: Roland Dreier Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/rdma/ib_addr.h | 7 ++++--- include/rdma/ib_pack.h | 19 +++++++++++-------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index ec5008cf5d51d2..8815989301ab0c 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -245,10 +245,11 @@ static inline void rdma_addr_set_dgid(struct rdma_dev_addr *dev_addr, union ib_g static inline enum ib_mtu iboe_get_mtu(int mtu) { /* - * reduce IB headers from effective IBoE MTU. 28 stands for - * atomic header which is the biggest possible header after BTH + * Reduce IB headers from effective IBoE MTU. */ - mtu = mtu - IB_GRH_BYTES - IB_BTH_BYTES - 28; + mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES + + IB_EXT_XRC_BYTES + IB_EXT_ATOMICETH_BYTES + + IB_ICRC_BYTES); if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096)) return IB_MTU_4096; diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h index 36655899ee028d..7ea1382ad0e59b 100644 --- a/include/rdma/ib_pack.h +++ b/include/rdma/ib_pack.h @@ -37,14 +37,17 @@ #include enum { - IB_LRH_BYTES = 8, - IB_ETH_BYTES = 14, - IB_VLAN_BYTES = 4, - IB_GRH_BYTES = 40, - IB_IP4_BYTES = 20, - IB_UDP_BYTES = 8, - IB_BTH_BYTES = 12, - IB_DETH_BYTES = 8 + IB_LRH_BYTES = 8, + IB_ETH_BYTES = 14, + IB_VLAN_BYTES = 4, + IB_GRH_BYTES = 40, + IB_IP4_BYTES = 20, + IB_UDP_BYTES = 8, + IB_BTH_BYTES = 12, + IB_DETH_BYTES = 8, + IB_EXT_ATOMICETH_BYTES = 28, + IB_EXT_XRC_BYTES = 4, + IB_ICRC_BYTES = 4 }; struct ib_field { From 4e2836b43102633dc9bffbc170be43e1d147914c Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Mon, 9 Oct 2017 16:45:55 +0800 Subject: [PATCH 0775/3326] vt6655: Fix a possible sleep-in-atomic bug in vt6655_suspend [ Upstream commit 42c8eb3f6e15367981b274cb79ee4657e2c6949d ] The driver may sleep under a spinlock, and the function call path is: vt6655_suspend (acquire the spinlock) pci_set_power_state __pci_start_power_transition (drivers/pci/pci.c) msleep --> may sleep To fix it, pci_set_power_state is called without having a spinlock. This bug is found by my static analysis tool and my code review. Signed-off-by: Jia-Ju Bai Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/vt6655/device_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 9fcf2e223f719d..1123b4f1e1d669 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1693,10 +1693,11 @@ static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state) MACbShutdown(priv); pci_disable_device(pcid); - pci_set_power_state(pcid, pci_choose_state(pcid, state)); spin_unlock_irqrestore(&priv->lock, flags); + pci_set_power_state(pcid, pci_choose_state(pcid, state)); + return 0; } From f18b2039d904f344dae779451592814e2c646686 Mon Sep 17 00:00:00 2001 From: Don Hiatt Date: Mon, 9 Oct 2017 12:38:12 -0700 Subject: [PATCH 0776/3326] IB/hfi1: Mask out A bit from psn trace [ Upstream commit d0a2f454713a42447ee4007582c0e43c47bcf230 ] The trace logic prior to the fixes below used to mask the A bit from the psn. It now mistakenly displays the A bit, which is already displayed separately. Fix by adding the appropriate mask to the psn tracing. Fixes: 228d2af1b723 ("IB/hfi1: Separate input/output header tracing") Fixes: 863cf89d472f ("IB/hfi1: Add 16B trace support") Reviewed-by: Mike Marciniszyn Signed-off-by: Don Hiatt Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/hfi1/trace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c index 9938bb983ce6f1..9749ec9dd9f2a6 100644 --- a/drivers/infiniband/hw/hfi1/trace.c +++ b/drivers/infiniband/hw/hfi1/trace.c @@ -154,7 +154,7 @@ void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr, *opcode = ib_bth_get_opcode(ohdr); *tver = ib_bth_get_tver(ohdr); *pkey = ib_bth_get_pkey(ohdr); - *psn = ib_bth_get_psn(ohdr); + *psn = mask_psn(ib_bth_get_psn(ohdr)); *qpn = ib_bth_get_qpn(ohdr); } @@ -169,7 +169,7 @@ void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr, *pad = ib_bth_get_pad(ohdr); *se = ib_bth_get_se(ohdr); *tver = ib_bth_get_tver(ohdr); - *psn = ib_bth_get_psn(ohdr); + *psn = mask_psn(ib_bth_get_psn(ohdr)); *qpn = ib_bth_get_qpn(ohdr); } From f898f36664a5d64a55c1257e55dfacaae6a86bb7 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sun, 8 Oct 2017 19:54:45 +0800 Subject: [PATCH 0777/3326] rtl8188eu: Fix a possible sleep-in-atomic bug in rtw_createbss_cmd [ Upstream commit 2bf9806d4228f7a6195f8e03eda0479d2a93b411 ] The driver may sleep under a spinlock, and the function call path is: rtw_surveydone_event_callback(acquire the spinlock) rtw_createbss_cmd kzalloc(GFP_KERNEL) --> may sleep To fix it, GFP_KERNEL is replaced with GFP_ATOMIC. This bug is found by my static analysis tool and my code review. Signed-off-by: Jia-Ju Bai Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtl8188eu/core/rtw_cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c index 9461bce883ea90..430b8eb5897654 100644 --- a/drivers/staging/rtl8188eu/core/rtw_cmd.c +++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c @@ -333,7 +333,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter) else RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid)); - pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (!pcmd) { res = _FAIL; goto exit; From f2d81d0f030afc7a873bb3454eaf59f384543baf Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sun, 8 Oct 2017 19:54:07 +0800 Subject: [PATCH 0778/3326] rtl8188eu: Fix a possible sleep-in-atomic bug in rtw_disassoc_cmd [ Upstream commit 08880f8e08cbd814e870e9d3ab9530abc1bce226 ] The driver may sleep under a spinlock, and the function call path is: rtw_set_802_11_bssid(acquire the spinlock) rtw_disassoc_cmd kzalloc(GFP_KERNEL) --> may sleep To fix it, GFP_KERNEL is replaced with GFP_ATOMIC. This bug is found by my static analysis tool and my code review. Signed-off-by: Jia-Ju Bai Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtl8188eu/core/rtw_cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c index 430b8eb5897654..be8542676adf9e 100644 --- a/drivers/staging/rtl8188eu/core/rtw_cmd.c +++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c @@ -508,7 +508,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu if (enqueue) { /* need enqueue, prepare cmd_obj and enqueue */ - cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL); + cmdobj = kzalloc(sizeof(*cmdobj), GFP_ATOMIC); if (!cmdobj) { res = _FAIL; kfree(param); From c97e41076a298dbc4e910c33048e553658388eed Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 17 Oct 2017 16:54:52 +0100 Subject: [PATCH 0779/3326] ipmi_si: fix memory leak on new_smi [ Upstream commit c0a32fe13cd323ca9420500b16fd69589c9ba91e ] The error exit path omits kfree'ing the allocated new_smi, causing a memory leak. Fix this by kfree'ing new_smi. Detected by CoverityScan, CID#14582571 ("Resource Leak") Fixes: 7e030d6dff71 ("ipmi: Prefer ACPI system interfaces over SMBIOS ones") Signed-off-by: Colin Ian King Signed-off-by: Corey Minyard Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/char/ipmi/ipmi_si_intf.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index c04aa11f0e2145..e1cbb78c680640 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -3469,6 +3469,7 @@ static int add_smi(struct smi_info *new_smi) ipmi_addr_src_to_str(new_smi->addr_source), si_to_str[new_smi->si_type]); rv = -EBUSY; + kfree(new_smi); goto out_err; } } From fc8f4ca137d9525b6f7db4b1d3e8f07a087518ec Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 17 Oct 2017 12:11:46 +0000 Subject: [PATCH 0780/3326] nullb: fix error return code in null_init() [ Upstream commit 30c516d750396c5f3ec9cb04c9e025c25e91495e ] Fix to return error code -ENOMEM from the null_alloc_dev() error handling case instead of 0, as done elsewhere in this function. Fixes: 2984c8684f96 ("nullb: factor disk parameters") Signed-off-by: Wei Yongjun Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/block/null_blk.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 8042c26ea9e6eb..4d55af5c6e5b56 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -1985,8 +1985,10 @@ static int __init null_init(void) for (i = 0; i < nr_devices; i++) { dev = null_alloc_dev(); - if (!dev) + if (!dev) { + ret = -ENOMEM; goto err_dev; + } ret = null_add_dev(dev); if (ret) { null_free_dev(dev); From 47b5dbdd983e0319cd5d135bbfe7e564f4bbaabc Mon Sep 17 00:00:00 2001 From: weiping zhang Date: Thu, 12 Oct 2017 14:57:06 +0800 Subject: [PATCH 0781/3326] scsi: sd: change manage_start_stop to bool in sysfs interface [ Upstream commit 623401ee33e42cee64d333877892be8db02951eb ] /sys/class/scsi_disk/0:2:0:0/manage_start_stop can be changed to 0 unexpectly by writing an invalid string. Signed-off-by: weiping zhang Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/sd.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index d841743b2107c9..b801212eefb574 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -231,11 +231,15 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; + bool v; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - sdp->manage_start_stop = simple_strtoul(buf, NULL, 10); + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdp->manage_start_stop = v; return count; } From dd2581c675e4311abc68826c25ec2563bccb3337 Mon Sep 17 00:00:00 2001 From: weiping zhang Date: Thu, 12 Oct 2017 14:56:44 +0800 Subject: [PATCH 0782/3326] scsi: sd: change allow_restart to bool in sysfs interface [ Upstream commit 658e9a6dc1126f21fa417cd213e1cdbff8be0ba2 ] /sys/class/scsi_disk/0:2:0:0/allow_restart can be changed to 0 unexpectedly by writing an invalid string such as the following: echo asdf > /sys/class/scsi_disk/0:2:0:0/allow_restart Signed-off-by: weiping zhang Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/sd.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b801212eefb574..72db0f7d221a7f 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -257,6 +257,7 @@ static ssize_t allow_restart_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + bool v; struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; @@ -266,7 +267,10 @@ allow_restart_store(struct device *dev, struct device_attribute *attr, if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) return -EINVAL; - sdp->allow_restart = simple_strtoul(buf, NULL, 10); + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdp->allow_restart = v; return count; } From b197f67ccfeb86d180a152796b5e3b7de1f611ef Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 4 Oct 2017 10:50:37 +0300 Subject: [PATCH 0783/3326] scsi: bfa: integer overflow in debugfs [ Upstream commit 3e351275655d3c84dc28abf170def9786db5176d ] We could allocate less memory than intended because we do: bfad->regdata = kzalloc(len << 2, GFP_KERNEL); The shift can overflow leading to a crash. This is debugfs code so the impact is very small. I fixed the network version of this in March with commit 13e2d5187f6b ("bna: integer overflow bug in debugfs"). Fixes: ab2a9ba189e8 ("[SCSI] bfa: add debugfs support") Signed-off-by: Dan Carpenter Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/bfa/bfad_debugfs.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 8dcd8c70c7ee08..05f523971348a8 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -255,7 +255,8 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, struct bfad_s *bfad = port->bfad; struct bfa_s *bfa = &bfad->bfa; struct bfa_ioc_s *ioc = &bfa->ioc; - int addr, len, rc, i; + int addr, rc, i; + u32 len; u32 *regbuf; void __iomem *rb, *reg_addr; unsigned long flags; @@ -266,7 +267,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, return PTR_ERR(kern_buf); rc = sscanf(kern_buf, "%x:%x", &addr, &len); - if (rc < 2) { + if (rc < 2 || len > (UINT_MAX >> 2)) { printk(KERN_INFO "bfad[%d]: %s failed to read user buf\n", bfad->inst_no, __func__); From 339aba679813ff1fabde362147919d3f6e536568 Mon Sep 17 00:00:00 2001 From: Artur Paszkiewicz Date: Fri, 29 Sep 2017 22:54:19 +0200 Subject: [PATCH 0784/3326] raid5-ppl: check recovery_offset when performing ppl recovery [ Upstream commit 07719ff767dcd8cc42050f185d332052f3816546 ] If starting an array that is undergoing rebuild, make ppl recovery honor the recovery_offset of a member disk and don't read data that is not yet in-sync. Signed-off-by: Artur Paszkiewicz Signed-off-by: Shaohua Li Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/md/raid5-ppl.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index cd026c88f7efa7..702b7600888690 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -758,7 +758,8 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, (unsigned long long)sector); rdev = conf->disks[dd_idx].rdev; - if (!rdev) { + if (!rdev || (!test_bit(In_sync, &rdev->flags) && + sector >= rdev->recovery_offset)) { pr_debug("%s:%*s data member disk %d missing\n", __func__, indent, "", dd_idx); update_parity = false; From 46788d19e137fc48d93f0a29eade2e02e05317f6 Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Fri, 29 Sep 2017 09:16:43 +0800 Subject: [PATCH 0785/3326] md-cluster: fix wrong condition check in raid1_write_request [ Upstream commit 385f4d7f946b08f36f68b0a28e95a319925b6b62 ] The check used here is to avoid conflict between write and resync, however we used the wrong logic, it should be the inverse of the checking inside "if". Fixes: 589a1c4 ("Suspend writes in RAID1 if within range") Signed-off-by: Guoqing Jiang Signed-off-by: Shaohua Li Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/md/raid1.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e4e8f9e565b71f..5a8216b50e3811 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1309,12 +1309,12 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, sigset_t full, old; prepare_to_wait(&conf->wait_barrier, &w, TASK_INTERRUPTIBLE); - if (bio_end_sector(bio) <= mddev->suspend_lo || - bio->bi_iter.bi_sector >= mddev->suspend_hi || - (mddev_is_clustered(mddev) && + if ((bio_end_sector(bio) <= mddev->suspend_lo || + bio->bi_iter.bi_sector >= mddev->suspend_hi) && + (!mddev_is_clustered(mddev) || !md_cluster_ops->area_resyncing(mddev, WRITE, - bio->bi_iter.bi_sector, - bio_end_sector(bio)))) + bio->bi_iter.bi_sector, + bio_end_sector(bio)))) break; sigfillset(&full); sigprocmask(SIG_BLOCK, &full, &old); From 42364042b7dd4ba7fed36c1db0a157c7c48282ca Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 9 Oct 2017 12:03:26 -0400 Subject: [PATCH 0786/3326] xprtrdma: Don't defer fencing an async RPC's chunks [ Upstream commit 8f66b1a529047a972cb9602a919c53a95f3d7a2b ] In current kernels, waiting in xprt_release appears to be safe to do. I had erroneously believed that for ASYNC RPCs, waiting of any kind in xprt_release->xprt_rdma_free would result in deadlock. I've done injection testing and consulted with Trond to confirm that waiting in the RPC release path is safe. For the very few times where RPC resources haven't yet been released earlier by the reply handler, it is safe to wait synchronously in xprt_rdma_free for invalidation rather than defering it to MR recovery. Note: When the QP is error state, posting a LocalInvalidate should flush and mark the MR as bad. There is no way the remote HCA can access that MR via a QP in error state, so it is effectively already inaccessible and thus safe for the Upper Layer to access. The next time the MR is used it should be recognized and cleaned up properly by frwr_op_map. Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/sunrpc/xprtrdma/transport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index c84e2b644e133e..8cf5ccfe180d35 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -686,7 +686,7 @@ xprt_rdma_free(struct rpc_task *task) dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply); if (!list_empty(&req->rl_registered)) - ia->ri_ops->ro_unmap_safe(r_xprt, req, !RPC_IS_ASYNC(task)); + ia->ri_ops->ro_unmap_sync(r_xprt, &req->rl_registered); rpcrdma_unmap_sges(ia, req); rpcrdma_buffer_put(req); } From d9490e7ca55bafac97fa65a3d34244386469eb50 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 16 Oct 2017 11:38:11 +0200 Subject: [PATCH 0787/3326] udf: Avoid overflow when session starts at large offset [ Upstream commit abdc0eb06964fe1d2fea6dd1391b734d0590365d ] When session starts beyond offset 2^31 the arithmetics in udf_check_vsd() would overflow. Make sure the computation is done in large enough type. Reported-by: Cezary Sliwa Signed-off-by: Jan Kara Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/udf/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/udf/super.c b/fs/udf/super.c index 99cb81d0077f94..08bf097507f6dc 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -703,7 +703,7 @@ static loff_t udf_check_vsd(struct super_block *sb) else sectorsize = sb->s_blocksize; - sector += (sbi->s_session << sb->s_blocksize_bits); + sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits); udf_debug("Starting at sector %u (%ld byte sectors)\n", (unsigned int)(sector >> sb->s_blocksize_bits), From f7af60377ec05e2452a73c349b4fa2a4196480c8 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 13 Oct 2017 13:40:24 -0700 Subject: [PATCH 0788/3326] macvlan: Only deliver one copy of the frame to the macvlan interface [ Upstream commit dd6b9c2c332b40f142740d1b11fb77c653ff98ea ] This patch intoduces a slight adjustment for macvlan to address the fact that in source mode I was seeing two copies of any packet addressed to the macvlan interface being delivered where there should have been only one. The issue appears to be that one copy was delivered based on the source MAC address and then the second copy was being delivered based on the destination MAC address. To fix it I am just treating a unicast address match as though it is not a match since source based macvlan isn't supposed to be matching based on the destination MAC anyway. Fixes: 79cf79abce71 ("macvlan: add source mode") Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/macvlan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d2aea961e0f410..fb1c9e095d0cab 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -480,7 +480,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) struct macvlan_dev, list); else vlan = macvlan_hash_lookup(port, eth->h_dest); - if (vlan == NULL) + if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE) return RX_HANDLER_PASS; dev = vlan->dev; From 9fc290e529b4948b2b18fa943d8bd5c3a5d5b6e1 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 11 Oct 2017 10:48:43 -0700 Subject: [PATCH 0789/3326] IB/core: Fix endianness annotation in rdma_is_multicast_addr() [ Upstream commit 1c3aea2bc8f0b2e5b57375ead40457ff75a3a2ec ] Since ipv4_addr is a big endian 32-bit number, annotate it as such. Fixes: commit be1d325a3358 ("IB/core: Set RoCEv2 MGID according to spec") Signed-off-by: Bart Van Assche Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/rdma/ib_addr.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index 8815989301ab0c..b2a10c76230470 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -306,12 +306,12 @@ static inline void rdma_get_ll_mac(struct in6_addr *addr, u8 *mac) static inline int rdma_is_multicast_addr(struct in6_addr *addr) { - u32 ipv4_addr; + __be32 ipv4_addr; if (addr->s6_addr[0] == 0xff) return 1; - memcpy(&ipv4_addr, addr->s6_addr + 12, 4); + ipv4_addr = addr->s6_addr32[3]; return (ipv6_addr_v4mapped(addr) && ipv4_is_multicast(ipv4_addr)); } From 02ef1dd301c2f1ca0ffe58f75bf2f0f4bd291270 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 11 Oct 2017 10:48:45 -0700 Subject: [PATCH 0790/3326] RDMA/cma: Avoid triggering undefined behavior [ Upstream commit c0b64f58e8d49570aa9ee55d880f92c20ff0166b ] According to the C standard the behavior of computations with integer operands is as follows: * A computation involving unsigned operands can never overflow, because a result that cannot be represented by the resulting unsigned integer type is reduced modulo the number that is one greater than the largest value that can be represented by the resulting type. * The behavior for signed integer underflow and overflow is undefined. Hence only use unsigned integers when checking for integer overflow. This patch is what I came up with after having analyzed the following smatch warnings: drivers/infiniband/core/cma.c:3448: cma_resolve_ib_udp() warn: signed overflow undefined. 'offset + conn_param->private_data_len < conn_param->private_data_len' drivers/infiniband/core/cma.c:3505: cma_connect_ib() warn: signed overflow undefined. 'offset + conn_param->private_data_len < conn_param->private_data_len' Signed-off-by: Bart Van Assche Acked-by: Sean Hefty Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/cma.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 852c8fec808859..fa79c7076ccd1b 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1540,7 +1540,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, return id_priv; } -static inline int cma_user_data_offset(struct rdma_id_private *id_priv) +static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv) { return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); } @@ -1942,7 +1942,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) struct rdma_id_private *listen_id, *conn_id = NULL; struct rdma_cm_event event; struct net_device *net_dev; - int offset, ret; + u8 offset; + int ret; listen_id = cma_id_from_event(cm_id, ib_event, &net_dev); if (IS_ERR(listen_id)) @@ -3440,7 +3441,8 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, struct ib_cm_sidr_req_param req; struct ib_cm_id *id; void *private_data; - int offset, ret; + u8 offset; + int ret; memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); @@ -3497,7 +3499,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, struct rdma_route *route; void *private_data; struct ib_cm_id *id; - int offset, ret; + u8 offset; + int ret; memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); From 023bff1b335860493dce3769c942b0172ea0fb5a Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Tue, 10 Oct 2017 10:36:41 +0300 Subject: [PATCH 0791/3326] IB/ipoib: Grab rtnl lock on heavy flush when calling ndo_open/stop [ Upstream commit b4b678b06f6eef18bff44a338c01870234db0bc9 ] When ndo_open and ndo_stop are called RTNL lock should be held. In this specific case ipoib_ib_dev_open calls the offloaded ndo_open which re-sets the number of TX queue assuming RTNL lock is held. Since RTNL lock is not held, RTNL assert will fail. Signed-off-by: Alex Vesker Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 6cd61638b44142..c97384c914a42a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -1203,10 +1203,15 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, ipoib_ib_dev_down(dev); if (level == IPOIB_FLUSH_HEAVY) { + rtnl_lock(); if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) ipoib_ib_dev_stop(dev); - if (ipoib_ib_dev_open(dev) != 0) + + result = ipoib_ib_dev_open(dev); + rtnl_unlock(); + if (result) return; + if (netif_queue_stopped(dev)) netif_start_queue(dev); } From e6a9d261e95f5ba5cda38ca9c6b7e580656032c4 Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Thu, 12 Oct 2017 16:12:37 +0200 Subject: [PATCH 0792/3326] icmp: don't fail on fragment reassembly time exceeded [ Upstream commit 258bbb1b0e594ad5f5652cb526b3c63e6a7fad3d ] The ICMP implementation currently replies to an ICMP time exceeded message (type 11) with an ICMP host unreachable message (type 3, code 1). However, time exceeded messages can either represent "time to live exceeded in transit" (code 0) or "fragment reassembly time exceeded" (code 1). Unconditionally replying to "fragment reassembly time exceeded" with host unreachable messages might cause unjustified connection resets which are now easily triggered as UFO has been removed, because, in turn, sending large buffers triggers IP fragmentation. The issue can be easily reproduced by running a lot of UDP streams which is likely to trigger IP fragmentation: # start netserver in the test namespace ip netns add test ip netns exec test netserver # create a VETH pair ip link add name veth0 type veth peer name veth0 netns test ip link set veth0 up ip -n test link set veth0 up for i in $(seq 20 29); do # assign addresses to both ends ip addr add dev veth0 192.168.$i.1/24 ip -n test addr add dev veth0 192.168.$i.2/24 # start the traffic netperf -L 192.168.$i.1 -H 192.168.$i.2 -t UDP_STREAM -l 0 & done # wait send_data: data send error: No route to host (errno 113) netperf: send_omni: send_data failed: No route to host We need to differentiate instead: if fragment reassembly time exceeded is reported, we need to silently drop the packet, if time to live exceeded is reported, maintain the current behaviour. In both cases increment the related error count "icmpInTimeExcds". While at it, fix a typo in a comment, and convert the if statement into a switch to mate it more readable. Signed-off-by: Matteo Croce Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv4/icmp.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 681e33998e03b6..3c1570d3e22fa8 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -782,7 +782,7 @@ static bool icmp_tag_validation(int proto) } /* - * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, ICMP_QUENCH, and + * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEEDED, ICMP_QUENCH, and * ICMP_PARAMETERPROB. */ @@ -810,7 +810,8 @@ static bool icmp_unreach(struct sk_buff *skb) if (iph->ihl < 5) /* Mangled header, drop. */ goto out_err; - if (icmph->type == ICMP_DEST_UNREACH) { + switch (icmph->type) { + case ICMP_DEST_UNREACH: switch (icmph->code & 15) { case ICMP_NET_UNREACH: case ICMP_HOST_UNREACH: @@ -846,8 +847,16 @@ static bool icmp_unreach(struct sk_buff *skb) } if (icmph->code > NR_ICMP_UNREACH) goto out; - } else if (icmph->type == ICMP_PARAMETERPROB) + break; + case ICMP_PARAMETERPROB: info = ntohl(icmph->un.gateway) >> 24; + break; + case ICMP_TIME_EXCEEDED: + __ICMP_INC_STATS(net, ICMP_MIB_INTIMEEXCDS); + if (icmph->code == ICMP_EXC_FRAGTIME) + goto out; + break; + } /* * Throw it at our lower layers From 1b07f7511a773db3a165b0a2eca35df5ba6dd1d3 Mon Sep 17 00:00:00 2001 From: Hans Holmberg Date: Fri, 13 Oct 2017 14:46:34 +0200 Subject: [PATCH 0793/3326] lightnvm: pblk: prevent gc kicks when gc is not operational MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 3e3a5b8ebd5d3b1d68facc58b0674a2564653222 ] GC can be kicked after it has been shut down when closing the last line during exit, resulting in accesses to freed structures. Make sure that GC is not triggered while it is not operational. Also make sure that GC won't be re-activated during exit when running on another processor by using timer_del_sync. Signed-off-by: Hans Holmberg Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/lightnvm/pblk-gc.c | 9 +++++---- drivers/lightnvm/pblk-init.c | 1 + 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c index 6090d28f7995a5..d6bae085e1d275 100644 --- a/drivers/lightnvm/pblk-gc.c +++ b/drivers/lightnvm/pblk-gc.c @@ -486,10 +486,10 @@ void pblk_gc_should_start(struct pblk *pblk) { struct pblk_gc *gc = &pblk->gc; - if (gc->gc_enabled && !gc->gc_active) + if (gc->gc_enabled && !gc->gc_active) { pblk_gc_start(pblk); - - pblk_gc_kick(pblk); + pblk_gc_kick(pblk); + } } /* @@ -628,7 +628,8 @@ void pblk_gc_exit(struct pblk *pblk) flush_workqueue(gc->gc_reader_wq); flush_workqueue(gc->gc_line_reader_wq); - del_timer(&gc->gc_timer); + gc->gc_enabled = 0; + del_timer_sync(&gc->gc_timer); pblk_gc_stop(pblk, 1); if (gc->gc_ts) diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 1b0f61233c216a..1a01f201e9e75b 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -923,6 +923,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, pblk->dev = dev; pblk->disk = tdisk; pblk->state = PBLK_STATE_RUNNING; + pblk->gc.gc_enabled = 0; spin_lock_init(&pblk->trans_lock); spin_lock_init(&pblk->lock); From e22f692fbabafe322db0c3821e02a5d6da350bef Mon Sep 17 00:00:00 2001 From: Rakesh Pandit Date: Fri, 13 Oct 2017 14:46:28 +0200 Subject: [PATCH 0794/3326] lightnvm: pblk: fix changing GC group list for a line MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 27b978725d895e704aab44b99242a0514485d798 ] pblk_line_gc_list seems to had a bug since the introduction of pblk in getting GC list for a line. In b20ba1bc7 while redesigning the GC algorithm, the naming for the GC thresholds was altered, but the values for high_thrs and mid_thrs were not. The result is that when moving to the GC lists, the mid threshold is never evaluated. Fixes: a4bd217b4("lightnvm: physical block device (pblk) target") Signed-off-by: Rakesh Pandit Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/lightnvm/pblk-init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 1a01f201e9e75b..8ccfb95a72ff34 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -681,8 +681,8 @@ static int pblk_lines_init(struct pblk *pblk) lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long); lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long); lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long); - lm->high_thrs = lm->sec_per_line / 2; - lm->mid_thrs = lm->sec_per_line / 4; + lm->mid_thrs = lm->sec_per_line / 2; + lm->high_thrs = lm->sec_per_line / 4; lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs; /* Calculate necessary pages for smeta. See comment over struct From 8594a9a79c390fc99c8d8af21b3208396c94b231 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Gonz=C3=A1lez?= Date: Fri, 13 Oct 2017 14:46:02 +0200 Subject: [PATCH 0795/3326] lightnvm: pblk: use right flag for GC allocation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 7d327a9ed6c4dca341ebf99012e0a6b80a3050e6 ] The data buffer for the GC path allocates virtual memory through vmalloc. When this change was introduced, a flag signaling kmalloc'ed memory was wrongly introduced. Use the right flag when creating a bio from this buffer. Fixes: de54e703a422 ("lightnvm: pblk: use vmalloc for GC data buffer") Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/lightnvm/pblk-read.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index d682e89e649351..ee8efb55b33076 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -499,7 +499,7 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data, data_len = (*secs_to_gc) * geo->sec_size; bio = pblk_bio_map_addr(pblk, data, *secs_to_gc, data_len, - PBLK_KMALLOC_META, GFP_KERNEL); + PBLK_VMALLOC_META, GFP_KERNEL); if (IS_ERR(bio)) { pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio)); goto err_free_dma; @@ -519,7 +519,7 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data, if (ret) { bio_endio(bio); pr_err("pblk: GC read request failed\n"); - goto err_free_dma; + goto err_free_bio; } if (!wait_for_completion_io_timeout(&wait, @@ -541,10 +541,13 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data, atomic_long_sub(*secs_to_gc, &pblk->inflight_reads); #endif + bio_put(bio); out: nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); return NVM_IO_OK; +err_free_bio: + bio_put(bio); err_free_dma: nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); return NVM_IO_ERR; From 83ef2175ba013ab5ec9b1ec50944ba60a6af888d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Gonz=C3=A1lez?= Date: Fri, 13 Oct 2017 14:46:01 +0200 Subject: [PATCH 0796/3326] lightnvm: pblk: initialize debug stat counter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit a1121176ff757e3c073490a69608ea0b18a00ec1 ] Initialize the stat counter for garbage collected reads. Fixes: a4bd217b43268 ("lightnvm: physical block device (pblk) target") Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/lightnvm/pblk-init.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 8ccfb95a72ff34..39de6121f46ea2 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -945,6 +945,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, atomic_long_set(&pblk->recov_writes, 0); atomic_long_set(&pblk->recov_writes, 0); atomic_long_set(&pblk->recov_gc_writes, 0); + atomic_long_set(&pblk->recov_gc_reads, 0); #endif atomic_long_set(&pblk->read_failed, 0); From 136e415e7a6a7d3d8666560b2147145c7636a455 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Gonz=C3=A1lez?= Date: Fri, 13 Oct 2017 14:46:06 +0200 Subject: [PATCH 0797/3326] lightnvm: pblk: fix min size for page mempool MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit bd432417681a224d9fa4a9d43be7d4edc82135b2 ] pblk uses an internal page mempool for allocating pages on internal bios. The main two users of this memory pool are partial reads (reads with some sectors in cache and some on media) and padded writes, which need to add dummy pages to an existing bio already containing valid data (and with a large enough bioset allocated). In both cases, the maximum number of pages per bio is defined by the maximum number of physical sectors supported by the underlying device. This patch fixes a bad mempool allocation, where the min_nr of elements on the pool was fixed (to 16), which is lower than the maximum number of sectors supported by NVMe (as of the time for this patch). Instead, use the maximum number of allowed sectors reported by the device. Reported-by: Jens Axboe Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/lightnvm/pblk-core.c | 6 +++--- drivers/lightnvm/pblk-init.c | 15 ++++++++------- drivers/lightnvm/pblk-read.c | 2 +- drivers/lightnvm/pblk.h | 2 +- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 81501644fb158b..bb3e9d5d427609 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -193,7 +193,7 @@ void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off, bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE); for (i = off; i < nr_pages + off; i++) { bv = bio->bi_io_vec[i]; - mempool_free(bv.bv_page, pblk->page_pool); + mempool_free(bv.bv_page, pblk->page_bio_pool); } } @@ -205,14 +205,14 @@ int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags, int i, ret; for (i = 0; i < nr_pages; i++) { - page = mempool_alloc(pblk->page_pool, flags); + page = mempool_alloc(pblk->page_bio_pool, flags); if (!page) goto err; ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0); if (ret != PBLK_EXPOSED_PAGE_SIZE) { pr_err("pblk: could not add page to bio\n"); - mempool_free(page, pblk->page_pool); + mempool_free(page, pblk->page_bio_pool); goto err; } } diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 39de6121f46ea2..1b75675ee67b82 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -132,7 +132,6 @@ static int pblk_rwb_init(struct pblk *pblk) } /* Minimum pages needed within a lun */ -#define PAGE_POOL_SIZE 16 #define ADDR_POOL_SIZE 64 static int pblk_set_ppaf(struct pblk *pblk) @@ -247,14 +246,16 @@ static int pblk_core_init(struct pblk *pblk) if (pblk_init_global_caches(pblk)) return -ENOMEM; - pblk->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0); - if (!pblk->page_pool) + /* internal bios can be at most the sectors signaled by the device. */ + pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev), + 0); + if (!pblk->page_bio_pool) return -ENOMEM; pblk->line_ws_pool = mempool_create_slab_pool(PBLK_WS_POOL_SIZE, pblk_blk_ws_cache); if (!pblk->line_ws_pool) - goto free_page_pool; + goto free_page_bio_pool; pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache); if (!pblk->rec_pool) @@ -309,8 +310,8 @@ static int pblk_core_init(struct pblk *pblk) mempool_destroy(pblk->rec_pool); free_blk_ws_pool: mempool_destroy(pblk->line_ws_pool); -free_page_pool: - mempool_destroy(pblk->page_pool); +free_page_bio_pool: + mempool_destroy(pblk->page_bio_pool); return -ENOMEM; } @@ -322,7 +323,7 @@ static void pblk_core_free(struct pblk *pblk) if (pblk->bb_wq) destroy_workqueue(pblk->bb_wq); - mempool_destroy(pblk->page_pool); + mempool_destroy(pblk->page_bio_pool); mempool_destroy(pblk->line_ws_pool); mempool_destroy(pblk->rec_pool); mempool_destroy(pblk->g_rq_pool); diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index ee8efb55b33076..402c732f0970ae 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -238,7 +238,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd, kunmap_atomic(src_p); kunmap_atomic(dst_p); - mempool_free(src_bv.bv_page, pblk->page_pool); + mempool_free(src_bv.bv_page, pblk->page_bio_pool); hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1); } while (hole < nr_secs); diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 67e623bd5c2df1..053164deb0721f 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -618,7 +618,7 @@ struct pblk { struct list_head compl_list; - mempool_t *page_pool; + mempool_t *page_bio_pool; mempool_t *line_ws_pool; mempool_t *rec_pool; mempool_t *g_rq_pool; From 99ab42f783da58540c80c91a2af8e8c764c46ca8 Mon Sep 17 00:00:00 2001 From: Rakesh Pandit Date: Fri, 13 Oct 2017 14:45:56 +0200 Subject: [PATCH 0798/3326] lightnvm: pblk: protect line bitmap while submitting meta io MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit e57903fd972a398b7140d0bc055714e13a0e58c5 ] It seems pblk_dealloc_page would race against pblk_alloc_pages for line bitmap for sector allocation.The chances are very low but might as well protect the bitmap properly. Signed-off-by: Rakesh Pandit Reviewed-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/lightnvm/pblk-core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index bb3e9d5d427609..3f0ddc0d7393fd 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -486,12 +486,14 @@ void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs) u64 addr; int i; + spin_lock(&line->lock); addr = find_next_zero_bit(line->map_bitmap, pblk->lm.sec_per_line, line->cur_sec); line->cur_sec = addr - nr_secs; for (i = 0; i < nr_secs; i++, line->cur_sec--) WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap)); + spin_unlock(&line->lock); } u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs) From e37eb54a000c767e7f39b806fb048c4be75ed786 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Wed, 27 Sep 2017 09:13:34 +0800 Subject: [PATCH 0799/3326] ath9k: fix tx99 potential info leak MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit ee0a47186e2fa9aa1c56cadcea470ca0ba8c8692 ] When the user sets count to zero the string buffer would remain completely uninitialized which causes the kernel to parse its own stack data, potentially leading to an info leak. In addition to that, the string might be not terminated properly when the user data does not contain a 0-terminator. Signed-off-by: Miaoqing Pan Reviewed-by: Christoph Böhmwalder Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/ath/ath9k/tx99.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index 49ed1afb913ca7..fe3a8263b22419 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c @@ -179,6 +179,9 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, ssize_t len; int r; + if (count < 1) + return -EINVAL; + if (sc->cur_chan->nvifs > 1) return -EOPNOTSUPP; @@ -186,6 +189,8 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, if (copy_from_user(buf, user_buf, len)) return -EFAULT; + buf[len] = '\0'; + if (strtobool(buf, &start)) return -EINVAL; From 5c212b59ded7fc9382e1c55c100f82709cf542ca Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Wed, 4 Oct 2017 12:22:55 +0300 Subject: [PATCH 0800/3326] ath10k: fix core PCI suspend when WoWLAN is supported but disabled [ Upstream commit 96378bd2c6cda5f04d0f6da2cd35d4670a982c38 ] For devices where the FW supports WoWLAN but user-space has not configured it, we don't do any PCI-specific suspend/resume operations, because mac80211 doesn't call drv_suspend() when !wowlan. This has particularly bad effects for some platforms, because we don't stop the power-save timer, and if this timer goes off after the PCI controller has suspended the link, Bad Things will happen. Commit 32faa3f0ee50 ("ath10k: add the PCI PM core suspend/resume ops") got some of this right, in that it understood there was a problem on non-WoWLAN firmware. But it forgot the $subject case. Fix this by moving all the PCI driver suspend/resume logic exclusively into the driver PM hooks. This shouldn't affect WoWLAN support much (this just gets executed later on). I would just as well kill the entirety of ath10k_hif_suspend(), as it's not even implemented on the USB or SDIO drivers. I expect that we don't need the callback, except to return "supported" (i.e., 0) or "not supported" (i.e., -EOPNOTSUPP). Fixes: 32faa3f0ee50 ("ath10k: add the PCI PM core suspend/resume ops") Fixes: 77258d409ce4 ("ath10k: enable pci soc powersaving") Signed-off-by: Brian Norris Cc: Ryan Hsu Cc: Kalle Valo Cc: Michal Kazior Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/ath/ath10k/pci.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 195dafb9813144..b18a9b690df4c1 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -2580,6 +2580,12 @@ void ath10k_pci_hif_power_down(struct ath10k *ar) #ifdef CONFIG_PM static int ath10k_pci_hif_suspend(struct ath10k *ar) +{ + /* Nothing to do; the important stuff is in the driver suspend. */ + return 0; +} + +static int ath10k_pci_suspend(struct ath10k *ar) { /* The grace timer can still be counting down and ar->ps_awake be true. * It is known that the device may be asleep after resuming regardless @@ -2592,6 +2598,12 @@ static int ath10k_pci_hif_suspend(struct ath10k *ar) } static int ath10k_pci_hif_resume(struct ath10k *ar) +{ + /* Nothing to do; the important stuff is in the driver resume. */ + return 0; +} + +static int ath10k_pci_resume(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct pci_dev *pdev = ar_pci->pdev; @@ -3401,11 +3413,7 @@ static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev) struct ath10k *ar = dev_get_drvdata(dev); int ret; - if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, - ar->running_fw->fw_file.fw_features)) - return 0; - - ret = ath10k_hif_suspend(ar); + ret = ath10k_pci_suspend(ar); if (ret) ath10k_warn(ar, "failed to suspend hif: %d\n", ret); @@ -3417,11 +3425,7 @@ static __maybe_unused int ath10k_pci_pm_resume(struct device *dev) struct ath10k *ar = dev_get_drvdata(dev); int ret; - if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, - ar->running_fw->fw_file.fw_features)) - return 0; - - ret = ath10k_hif_resume(ar); + ret = ath10k_pci_resume(ar); if (ret) ath10k_warn(ar, "failed to resume hif: %d\n", ret); From d094690cbed906ca96752cb94abed50fbf1cfaa3 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Thu, 19 Oct 2017 11:45:19 -0700 Subject: [PATCH 0801/3326] ath10k: fix build errors with !CONFIG_PM [ Upstream commit 20665a9076d48e9abd9a2db13d307f58f7ef6647 ] Build errors have been reported with CONFIG_PM=n: drivers/net/wireless/ath/ath10k/pci.c:3416:8: error: implicit declaration of function 'ath10k_pci_suspend' [-Werror=implicit-function-declaration] drivers/net/wireless/ath/ath10k/pci.c:3428:8: error: implicit declaration of function 'ath10k_pci_resume' [-Werror=implicit-function-declaration] These are caused by the combination of the following two commits: 6af1de2e4ec4 ("ath10k: mark PM functions as __maybe_unused") 96378bd2c6cd ("ath10k: fix core PCI suspend when WoWLAN is supported but disabled") Both build fine on their own. But now that ath10k_pci_pm_{suspend,resume}() is compiled unconditionally, we should also compile ath10k_pci_{suspend,resume}() unconditionally. And drop the #ifdef around ath10k_pci_hif_{suspend,resume}() too; they are trivial (empty), so we're not saving much space by compiling them out. And the alternatives would be to sprinkle more __maybe_unused, or spread the #ifdef's further. Build tested with the following combinations: CONFIG_PM=y && CONFIG_PM_SLEEP=y CONFIG_PM=y && CONFIG_PM_SLEEP=n CONFIG_PM=n Fixes: 96378bd2c6cd ("ath10k: fix core PCI suspend when WoWLAN is supported but disabled") Fixes: 096ad2a15fd8 ("Merge branch 'ath-next'") Signed-off-by: Brian Norris Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/ath/ath10k/pci.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index b18a9b690df4c1..d790ea20b95d92 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -2577,8 +2577,6 @@ void ath10k_pci_hif_power_down(struct ath10k *ar) */ } -#ifdef CONFIG_PM - static int ath10k_pci_hif_suspend(struct ath10k *ar) { /* Nothing to do; the important stuff is in the driver suspend. */ @@ -2627,7 +2625,6 @@ static int ath10k_pci_resume(struct ath10k *ar) return ret; } -#endif static bool ath10k_pci_validate_cal(void *data, size_t size) { @@ -2782,10 +2779,8 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .power_down = ath10k_pci_hif_power_down, .read32 = ath10k_pci_read32, .write32 = ath10k_pci_write32, -#ifdef CONFIG_PM .suspend = ath10k_pci_hif_suspend, .resume = ath10k_pci_hif_resume, -#endif .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom, }; From bed6119aa21b28f096880342f9e2445f9ad47b25 Mon Sep 17 00:00:00 2001 From: Bin Liu Date: Tue, 5 Dec 2017 08:45:30 -0600 Subject: [PATCH 0802/3326] usb: musb: da8xx: fix babble condition handling commit bd3486ded7a0c313a6575343e6c2b21d14476645 upstream. When babble condition happens, the musb controller might automatically turns off VBUS. On DA8xx platform, the controller generates drvvbus interrupt for turning off VBUS along with the babble interrupt. In this case, we should handle the babble interrupt first and recover from the babble condition. This change ignores the drvvbus interrupt if babble interrupt is also generated at the same time, so the babble recovery routine works properly. Signed-off-by: Bin Liu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/musb/da8xx.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index df88123274ca72..972bf42101893a 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c @@ -305,7 +305,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci) musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; portstate(musb->port1_status |= USB_PORT_STAT_POWER); del_timer(&otg_workaround); - } else { + } else if (!(musb->int_usb & MUSB_INTR_BABBLE)){ + /* + * When babble condition happens, drvvbus interrupt + * is also generated. Ignore this drvvbus interrupt + * and let babble interrupt handler recovers the + * controller; otherwise, the host-mode flag is lost + * due to the MUSB_DEV_MODE() call below and babble + * recovery logic will not called. + */ musb->is_active = 0; MUSB_DEV_MODE(musb); otg->default_a = 0; From 7b3775017f4e6b87dfd2c7f63d1eaf057948f31d Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 20 Dec 2017 10:10:38 +0100 Subject: [PATCH 0803/3326] Linux 4.14.8 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 39d7af0165a8fd..97b5ae76ac8cdf 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 7 +SUBLEVEL = 8 EXTRAVERSION = NAME = Petit Gorille From c09061aec2e5245f547ea6eecf321e172e42b7fb Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Wed, 6 Sep 2017 17:18:08 +0200 Subject: [PATCH 0804/3326] x86/asm: Remove unnecessary \n\t in front of CC_SET() from asm templates commit 3c52b5c64326d9dcfee4e10611c53ec1b1b20675 upstream. There is no need for \n\t in front of CC_SET(), as the macro already includes these two. Signed-off-by: Uros Bizjak Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20170906151808.5634-1-ubizjak@gmail.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/archrandom.h | 8 ++++---- arch/x86/include/asm/bitops.h | 10 +++++----- arch/x86/include/asm/percpu.h | 2 +- arch/x86/include/asm/rmwcc.h | 2 +- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h index 5b0579abb39829..3ac991d81e74d9 100644 --- a/arch/x86/include/asm/archrandom.h +++ b/arch/x86/include/asm/archrandom.h @@ -45,7 +45,7 @@ static inline bool rdrand_long(unsigned long *v) bool ok; unsigned int retry = RDRAND_RETRY_LOOPS; do { - asm volatile(RDRAND_LONG "\n\t" + asm volatile(RDRAND_LONG CC_SET(c) : CC_OUT(c) (ok), "=a" (*v)); if (ok) @@ -59,7 +59,7 @@ static inline bool rdrand_int(unsigned int *v) bool ok; unsigned int retry = RDRAND_RETRY_LOOPS; do { - asm volatile(RDRAND_INT "\n\t" + asm volatile(RDRAND_INT CC_SET(c) : CC_OUT(c) (ok), "=a" (*v)); if (ok) @@ -71,7 +71,7 @@ static inline bool rdrand_int(unsigned int *v) static inline bool rdseed_long(unsigned long *v) { bool ok; - asm volatile(RDSEED_LONG "\n\t" + asm volatile(RDSEED_LONG CC_SET(c) : CC_OUT(c) (ok), "=a" (*v)); return ok; @@ -80,7 +80,7 @@ static inline bool rdseed_long(unsigned long *v) static inline bool rdseed_int(unsigned int *v) { bool ok; - asm volatile(RDSEED_INT "\n\t" + asm volatile(RDSEED_INT CC_SET(c) : CC_OUT(c) (ok), "=a" (*v)); return ok; diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 2bcf4731495959..3fa039855b8f70 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -143,7 +143,7 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) { bool negative; - asm volatile(LOCK_PREFIX "andb %2,%1\n\t" + asm volatile(LOCK_PREFIX "andb %2,%1" CC_SET(s) : CC_OUT(s) (negative), ADDR : "ir" ((char) ~(1 << nr)) : "memory"); @@ -246,7 +246,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * { bool oldbit; - asm("bts %2,%1\n\t" + asm("bts %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr)); @@ -286,7 +286,7 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long { bool oldbit; - asm volatile("btr %2,%1\n\t" + asm volatile("btr %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr)); @@ -298,7 +298,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon { bool oldbit; - asm volatile("btc %2,%1\n\t" + asm volatile("btc %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr) : "memory"); @@ -329,7 +329,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l { bool oldbit; - asm volatile("bt %2,%1\n\t" + asm volatile("bt %2,%1" CC_SET(c) : CC_OUT(c) (oldbit) : "m" (*(unsigned long *)addr), "Ir" (nr)); diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 377f1ffd18be67..ba3c523aaf1618 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -526,7 +526,7 @@ static inline bool x86_this_cpu_variable_test_bit(int nr, { bool oldbit; - asm volatile("bt "__percpu_arg(2)",%1\n\t" + asm volatile("bt "__percpu_arg(2)",%1" CC_SET(c) : CC_OUT(c) (oldbit) : "m" (*(unsigned long __percpu *)addr), "Ir" (nr)); diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h index d8f3a6ae9f6c98..f91c365e57c36d 100644 --- a/arch/x86/include/asm/rmwcc.h +++ b/arch/x86/include/asm/rmwcc.h @@ -29,7 +29,7 @@ cc_label: \ #define __GEN_RMWcc(fullop, var, cc, clobbers, ...) \ do { \ bool c; \ - asm volatile (fullop ";" CC_SET(cc) \ + asm volatile (fullop CC_SET(cc) \ : [counter] "+m" (var), CC_OUT(cc) (c) \ : __VA_ARGS__ : clobbers); \ return c; \ From 42314edefac8a80efb3d00a9f83bf1b1871bd473 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 18 Sep 2017 21:43:30 -0500 Subject: [PATCH 0805/3326] objtool: Don't report end of section error after an empty unwind hint commit 00d96180dc38ef872ac471c2d3e14b067cbd895d upstream. If asm code specifies an UNWIND_HINT_EMPTY hint, don't warn if the section ends unexpectedly. This can happen with the xen-head.S code because the hypercall_page is "text" but it's all zeros. Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Jiri Slaby Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/ddafe199dd8797e40e3c2777373347eba1d65572.1505764066.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/objtool/check.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index c0e26ad1fa7e3d..9b341584eb1b56 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -1757,11 +1757,14 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, if (insn->dead_end) return 0; - insn = next_insn; - if (!insn) { + if (!next_insn) { + if (state.cfa.base == CFI_UNDEFINED) + return 0; WARN("%s: unexpected end of section", sec->name); return 1; } + + insn = next_insn; } return 0; From 9cf5a88b165e203d8c7d5284640ea6aa61baf78a Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 18 Sep 2017 21:43:31 -0500 Subject: [PATCH 0806/3326] x86/head: Remove confusing comment commit 17270717e80de33a884ad328fea5f407d87f6d6a upstream. This comment is actively wrong and confusing. It refers to the registers' stack offsets after the pt_regs has been constructed on the stack, but this code is *before* that. At this point the stack just has the standard iret frame, for which no comment should be needed. Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Jiri Slaby Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/a3c267b770fc56c9b86df9c11c552848248aace2.1505764066.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/head_64.S | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 6dde3f3fc1f8e7..41d6146dbd5a25 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -271,10 +271,6 @@ bad_address: __INIT ENTRY(early_idt_handler_array) - # 104(%rsp) %rflags - # 96(%rsp) %cs - # 88(%rsp) %rip - # 80(%rsp) error code i = 0 .rept NUM_EXCEPTION_VECTORS .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 From 98ce8eee6021e32ba8f606ed7de2a9c0f3807dcf Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 18 Sep 2017 21:43:32 -0500 Subject: [PATCH 0807/3326] x86/head: Remove unused 'bad_address' code commit a8b88e84d124bc92c4808e72b8b8c0e0bb538630 upstream. It's no longer possible for this code to be executed, so remove it. Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Jiri Slaby Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/32a46fe92d2083700599b36872b26e7dfd7b7965.1505764066.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/head_64.S | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 41d6146dbd5a25..fa800dbeb70737 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -266,9 +266,6 @@ ENDPROC(start_cpu0) .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS __FINITDATA -bad_address: - jmp bad_address - __INIT ENTRY(early_idt_handler_array) i = 0 From 8233afff9b45feca9020b2fc52f7967da5a13907 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 18 Sep 2017 21:43:33 -0500 Subject: [PATCH 0808/3326] x86/head: Fix head ELF function annotations commit 015a2ea5478680fc5216d56b7ff306f2a74efaf9 upstream. These functions aren't callable C-type functions, so don't annotate them as such. Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Jiri Slaby Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/36eb182738c28514f8bf95e403d89b6413a88883.1505764066.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/head_64.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index fa800dbeb70737..2d83ff6e5158f1 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -235,7 +235,7 @@ ENTRY(secondary_startup_64) pushq %rax # target address in negative space lretq .Lafter_lret: -ENDPROC(secondary_startup_64) +END(secondary_startup_64) #include "verify_cpu.S" @@ -278,7 +278,7 @@ ENTRY(early_idt_handler_array) i = i + 1 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr -ENDPROC(early_idt_handler_array) +END(early_idt_handler_array) early_idt_handler_common: /* @@ -321,7 +321,7 @@ early_idt_handler_common: 20: decl early_recursion_flag(%rip) jmp restore_regs_and_iret -ENDPROC(early_idt_handler_common) +END(early_idt_handler_common) __INITDATA From aad9d83f9dcbc896a034f2676d3d2b661b0ca5cf Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 18 Sep 2017 21:43:34 -0500 Subject: [PATCH 0809/3326] x86/boot: Annotate verify_cpu() as a callable function commit e93db75a0054b23a874a12c63376753544f3fe9e upstream. verify_cpu() is a callable function. Annotate it as such. Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Jiri Slaby Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/293024b8a080832075312f38c07ccc970fc70292.1505764066.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/verify_cpu.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S index 014ea59aa153e4..3d3c2f71f61719 100644 --- a/arch/x86/kernel/verify_cpu.S +++ b/arch/x86/kernel/verify_cpu.S @@ -33,7 +33,7 @@ #include #include -verify_cpu: +ENTRY(verify_cpu) pushf # Save caller passed flags push $0 # Kill any dangerous flags popf @@ -139,3 +139,4 @@ verify_cpu: popf # Restore caller passed flags xorl %eax, %eax ret +ENDPROC(verify_cpu) From 2c9863c1687b2e1398d943351ede4ad45ab77cbe Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 18 Sep 2017 21:43:35 -0500 Subject: [PATCH 0810/3326] x86/xen: Fix xen head ELF annotations commit 2582d3df95c76d3b686453baf90b64d57e87d1e8 upstream. Mark the ends of the startup_xen and hypercall_page code sections. Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Jiri Slaby Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/3a80a394d30af43d9cefa1a29628c45ed8420c97.1505764066.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/xen/xen-head.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index b5b8d7f4355744..dc1b8439617fff 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -34,7 +34,7 @@ ENTRY(startup_xen) mov $init_thread_union+THREAD_SIZE, %_ASM_SP jmp xen_start_kernel - +END(startup_xen) __FINIT #endif @@ -48,7 +48,7 @@ ENTRY(hypercall_page) .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32 #include #undef HYPERCALL - +END(hypercall_page) .popsection ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") From d074a1075f6a123e3f9c5fdbac7eea4c1006e03b Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 18 Sep 2017 21:43:36 -0500 Subject: [PATCH 0811/3326] x86/xen: Add unwind hint annotations commit abbe1cac6214d81d2f4e149aba64a8760703144e upstream. Add unwind hint annotations to the xen head code so the ORC unwinder can read head_64.o. hypercall_page needs empty annotations at 32-byte intervals to match the 'xen_hypercall_*' ELF functions at those locations. Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Jiri Slaby Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/70ed2eb516fe9266be766d953f93c2571bca88cc.1505764066.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/xen/xen-head.S | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index dc1b8439617fff..497cc55a0c16cc 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -20,6 +21,7 @@ #ifdef CONFIG_XEN_PV __INIT ENTRY(startup_xen) + UNWIND_HINT_EMPTY cld /* Clear .bss */ @@ -41,7 +43,10 @@ END(startup_xen) .pushsection .text .balign PAGE_SIZE ENTRY(hypercall_page) - .skip PAGE_SIZE + .rept (PAGE_SIZE / 32) + UNWIND_HINT_EMPTY + .skip 32 + .endr #define HYPERCALL(n) \ .equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \ From 5d5e60c80fd8613b8a76876b3ffd357f7a0bf7e7 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 18 Sep 2017 21:43:37 -0500 Subject: [PATCH 0812/3326] x86/head: Add unwind hint annotations commit 2704fbb672d0d9a19414907fda7949283dcef6a1 upstream. Jiri Slaby reported an ORC issue when unwinding from an idle task. The stack was: ffffffff811083c2 do_idle+0x142/0x1e0 ffffffff8110861d cpu_startup_entry+0x5d/0x60 ffffffff82715f58 start_kernel+0x3ff/0x407 ffffffff827153e8 x86_64_start_kernel+0x14e/0x15d ffffffff810001bf secondary_startup_64+0x9f/0xa0 The ORC unwinder errored out at secondary_startup_64 because the head code isn't annotated yet so there wasn't a corresponding ORC entry. Fix that and any other head-related unwinding issues by adding unwind hints to the head code. Reported-by: Jiri Slaby Tested-by: Jiri Slaby Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/78ef000a2f68f545d6eef44ee912edceaad82ccf.1505764066.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/Makefile | 1 - arch/x86/kernel/head_64.S | 14 ++++++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 5f70044340ff1c..fbb0cbd4f2aa84 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -27,7 +27,6 @@ KASAN_SANITIZE_dumpstack.o := n KASAN_SANITIZE_dumpstack_$(BITS).o := n KASAN_SANITIZE_stacktrace.o := n -OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y OBJECT_FILES_NON_STANDARD_test_nx.o := y diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 2d83ff6e5158f1..6733cac7cba5b8 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -50,6 +50,7 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map) .code64 .globl startup_64 startup_64: + UNWIND_HINT_EMPTY /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded an identity mapped page table @@ -89,6 +90,7 @@ startup_64: addq $(early_top_pgt - __START_KERNEL_map), %rax jmp 1f ENTRY(secondary_startup_64) + UNWIND_HINT_EMPTY /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded a mapped page table. @@ -133,6 +135,7 @@ ENTRY(secondary_startup_64) movq $1f, %rax jmp *%rax 1: + UNWIND_HINT_EMPTY /* Check if nx is implemented */ movl $0x80000001, %eax @@ -247,6 +250,7 @@ END(secondary_startup_64) */ ENTRY(start_cpu0) movq initial_stack(%rip), %rsp + UNWIND_HINT_EMPTY jmp .Ljump_to_C_code ENDPROC(start_cpu0) #endif @@ -271,13 +275,18 @@ ENTRY(early_idt_handler_array) i = 0 .rept NUM_EXCEPTION_VECTORS .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 - pushq $0 # Dummy error code, to make stack frame uniform + UNWIND_HINT_IRET_REGS + pushq $0 # Dummy error code, to make stack frame uniform + .else + UNWIND_HINT_IRET_REGS offset=8 .endif pushq $i # 72(%rsp) Vector number jmp early_idt_handler_common + UNWIND_HINT_IRET_REGS i = i + 1 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr + UNWIND_HINT_IRET_REGS offset=16 END(early_idt_handler_array) early_idt_handler_common: @@ -306,6 +315,7 @@ early_idt_handler_common: pushq %r13 /* pt_regs->r13 */ pushq %r14 /* pt_regs->r14 */ pushq %r15 /* pt_regs->r15 */ + UNWIND_HINT_REGS cmpq $14,%rsi /* Page fault? */ jnz 10f @@ -428,7 +438,7 @@ ENTRY(phys_base) EXPORT_SYMBOL(phys_base) #include "../../x86/xen/xen-head.S" - + __PAGE_ALIGNED_BSS NEXT_PAGE(empty_zero_page) .skip PAGE_SIZE From 2cb7165b4dcf80b03d7cc86fedd1a65a455823b3 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 25 Sep 2017 02:06:19 -0600 Subject: [PATCH 0813/3326] ACPI / APEI: adjust a local variable type in ghes_ioremap_pfn_irq() commit 095f613c6b386a1704b73a549e9ba66c1d5381ae upstream. Match up with what 7edda0886b ("acpi: apei: handle SEA notification type for ARMv8") did for ghes_ioremap_pfn_nmi(). Signed-off-by: Jan Beulich Reviewed-by: Borislav Petkov Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/apei/ghes.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 3c3a37b8503bd4..69ef0b6bf25dfc 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -174,7 +174,8 @@ static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) { - unsigned long vaddr, paddr; + unsigned long vaddr; + phys_addr_t paddr; pgprot_t prot; vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); From ab7fc55ef231511ae40adf4c51cec89035dad4db Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 12 Oct 2017 09:24:30 +0200 Subject: [PATCH 0814/3326] x86/unwinder: Make CONFIG_UNWINDER_ORC=y the default in the 64-bit defconfig commit 1e4078f0bba46ad61b69548abe6a6faf63b89380 upstream. Increase testing coverage by turning on the primary x86 unwinder for the 64-bit defconfig. Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/configs/x86_64_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 4a4b16e56d354f..eb65c248708d4f 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -299,6 +299,7 @@ CONFIG_DEBUG_STACKOVERFLOW=y # CONFIG_DEBUG_RODATA_TEST is not set CONFIG_DEBUG_BOOT_PARAMS=y CONFIG_OPTIMIZE_INLINING=y +CONFIG_ORC_UNWINDER=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_SELINUX=y From a11adc6a2e0b429adab534c44f68c057d12ecd4d Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 12 Oct 2017 18:06:19 -0400 Subject: [PATCH 0815/3326] x86/fpu/debug: Remove unused 'x86_fpu_state' and 'x86_fpu_deactivate_state' tracepoints commit 127a1bea40f7f2a36bc7207ea4d51bb6b4e936fa upstream. Commit: d1898b733619 ("x86/fpu: Add tracepoints to dump FPU state at key points") ... added the 'x86_fpu_state' and 'x86_fpu_deactivate_state' trace points, but never used them. Today they are still not used. As they take up and waste memory, remove them. Signed-off-by: Steven Rostedt (VMware) Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20171012180619.670b68b6@gandalf.local.home Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/trace/fpu.h | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h index fa60398bbc3ace..069c04be150760 100644 --- a/arch/x86/include/asm/trace/fpu.h +++ b/arch/x86/include/asm/trace/fpu.h @@ -34,11 +34,6 @@ DECLARE_EVENT_CLASS(x86_fpu, ) ); -DEFINE_EVENT(x86_fpu, x86_fpu_state, - TP_PROTO(struct fpu *fpu), - TP_ARGS(fpu) -); - DEFINE_EVENT(x86_fpu, x86_fpu_before_save, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) @@ -74,11 +69,6 @@ DEFINE_EVENT(x86_fpu, x86_fpu_activate_state, TP_ARGS(fpu) ); -DEFINE_EVENT(x86_fpu, x86_fpu_deactivate_state, - TP_PROTO(struct fpu *fpu), - TP_ARGS(fpu) -); - DEFINE_EVENT(x86_fpu, x86_fpu_init_state, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) From 8af220c9e240a47660686161e6d04043ad09c563 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Fri, 13 Oct 2017 15:02:00 -0500 Subject: [PATCH 0816/3326] x86/unwind: Rename unwinder config options to 'CONFIG_UNWINDER_*' commit 11af847446ed0d131cf24d16a7ef3d5ea7a49554 upstream. Rename the unwinder config options from: CONFIG_ORC_UNWINDER CONFIG_FRAME_POINTER_UNWINDER CONFIG_GUESS_UNWINDER to: CONFIG_UNWINDER_ORC CONFIG_UNWINDER_FRAME_POINTER CONFIG_UNWINDER_GUESS ... in order to give them a more logical config namespace. Suggested-by: Ingo Molnar Signed-off-by: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/73972fc7e2762e91912c6b9584582703d6f1b8cc.1507924831.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- Documentation/x86/orc-unwinder.txt | 2 +- Makefile | 4 ++-- arch/x86/Kconfig | 2 +- arch/x86/Kconfig.debug | 10 +++++----- arch/x86/configs/tiny.config | 4 ++-- arch/x86/configs/x86_64_defconfig | 2 +- arch/x86/include/asm/module.h | 2 +- arch/x86/include/asm/unwind.h | 8 ++++---- arch/x86/kernel/Makefile | 6 +++--- include/asm-generic/vmlinux.lds.h | 2 +- lib/Kconfig.debug | 2 +- scripts/Makefile.build | 2 +- 12 files changed, 23 insertions(+), 23 deletions(-) diff --git a/Documentation/x86/orc-unwinder.txt b/Documentation/x86/orc-unwinder.txt index af0c9a4c65a6da..cd4b29be29af1e 100644 --- a/Documentation/x86/orc-unwinder.txt +++ b/Documentation/x86/orc-unwinder.txt @@ -4,7 +4,7 @@ ORC unwinder Overview -------- -The kernel CONFIG_ORC_UNWINDER option enables the ORC unwinder, which is +The kernel CONFIG_UNWINDER_ORC option enables the ORC unwinder, which is similar in concept to a DWARF unwinder. The difference is that the format of the ORC data is much simpler than DWARF, which in turn allows the ORC unwinder to be much simpler and faster. diff --git a/Makefile b/Makefile index 97b5ae76ac8cdf..41632ea2505513 100644 --- a/Makefile +++ b/Makefile @@ -935,8 +935,8 @@ ifdef CONFIG_STACK_VALIDATION ifeq ($(has_libelf),1) objtool_target := tools/objtool FORCE else - ifdef CONFIG_ORC_UNWINDER - $(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") + ifdef CONFIG_UNWINDER_ORC + $(error "Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") else $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") endif diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9bceea6a5852ed..6aad9f070b9d33 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -171,7 +171,7 @@ config X86 select HAVE_PERF_USER_STACK_DUMP select HAVE_RCU_TABLE_FREE select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_RELIABLE_STACKTRACE if X86_64 && FRAME_POINTER_UNWINDER && STACK_VALIDATION + select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION select HAVE_STACK_VALIDATION if X86_64 select HAVE_SYSCALL_TRACEPOINTS select HAVE_UNSTABLE_SCHED_CLOCK diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 90b123056f4b38..728b44a5697830 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -359,13 +359,13 @@ config PUNIT_ATOM_DEBUG choice prompt "Choose kernel unwinder" - default FRAME_POINTER_UNWINDER + default UNWINDER_FRAME_POINTER ---help--- This determines which method will be used for unwinding kernel stack traces for panics, oopses, bugs, warnings, perf, /proc//stack, livepatch, lockdep, and more. -config FRAME_POINTER_UNWINDER +config UNWINDER_FRAME_POINTER bool "Frame pointer unwinder" select FRAME_POINTER ---help--- @@ -380,7 +380,7 @@ config FRAME_POINTER_UNWINDER consistency model, as this is currently the only way to get a reliable stack trace (CONFIG_HAVE_RELIABLE_STACKTRACE). -config ORC_UNWINDER +config UNWINDER_ORC bool "ORC unwinder" depends on X86_64 select STACK_VALIDATION @@ -396,7 +396,7 @@ config ORC_UNWINDER Enabling this option will increase the kernel's runtime memory usage by roughly 2-4MB, depending on your kernel config. -config GUESS_UNWINDER +config UNWINDER_GUESS bool "Guess unwinder" depends on EXPERT ---help--- @@ -411,7 +411,7 @@ config GUESS_UNWINDER endchoice config FRAME_POINTER - depends on !ORC_UNWINDER && !GUESS_UNWINDER + depends on !UNWINDER_ORC && !UNWINDER_GUESS bool endmenu diff --git a/arch/x86/configs/tiny.config b/arch/x86/configs/tiny.config index 550cd5012b7354..66c9e2aab16cc3 100644 --- a/arch/x86/configs/tiny.config +++ b/arch/x86/configs/tiny.config @@ -1,5 +1,5 @@ CONFIG_NOHIGHMEM=y # CONFIG_HIGHMEM4G is not set # CONFIG_HIGHMEM64G is not set -CONFIG_GUESS_UNWINDER=y -# CONFIG_FRAME_POINTER_UNWINDER is not set +CONFIG_UNWINDER_GUESS=y +# CONFIG_UNWINDER_FRAME_POINTER is not set diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index eb65c248708d4f..e32fc1f274d854 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -299,7 +299,7 @@ CONFIG_DEBUG_STACKOVERFLOW=y # CONFIG_DEBUG_RODATA_TEST is not set CONFIG_DEBUG_BOOT_PARAMS=y CONFIG_OPTIMIZE_INLINING=y -CONFIG_ORC_UNWINDER=y +CONFIG_UNWINDER_ORC=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_SELINUX=y diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index 8546fafa21a913..7948a17febb4b5 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h @@ -6,7 +6,7 @@ #include struct mod_arch_specific { -#ifdef CONFIG_ORC_UNWINDER +#ifdef CONFIG_UNWINDER_ORC unsigned int num_orcs; int *orc_unwind_ip; struct orc_entry *orc_unwind; diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 87adc0d38c4aa9..e9cc6fe1fc6f95 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -13,11 +13,11 @@ struct unwind_state { struct task_struct *task; int graph_idx; bool error; -#if defined(CONFIG_ORC_UNWINDER) +#if defined(CONFIG_UNWINDER_ORC) bool signal, full_regs; unsigned long sp, bp, ip; struct pt_regs *regs; -#elif defined(CONFIG_FRAME_POINTER_UNWINDER) +#elif defined(CONFIG_UNWINDER_FRAME_POINTER) bool got_irq; unsigned long *bp, *orig_sp, ip; struct pt_regs *regs; @@ -51,7 +51,7 @@ void unwind_start(struct unwind_state *state, struct task_struct *task, __unwind_start(state, task, regs, first_frame); } -#if defined(CONFIG_ORC_UNWINDER) || defined(CONFIG_FRAME_POINTER_UNWINDER) +#if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER) static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) { if (unwind_done(state)) @@ -66,7 +66,7 @@ static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) } #endif -#ifdef CONFIG_ORC_UNWINDER +#ifdef CONFIG_UNWINDER_ORC void unwind_init(void); void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size); diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index fbb0cbd4f2aa84..d12da41f72daf0 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -127,9 +127,9 @@ obj-$(CONFIG_PERF_EVENTS) += perf_regs.o obj-$(CONFIG_TRACING) += tracepoint.o obj-$(CONFIG_SCHED_MC_PRIO) += itmt.o -obj-$(CONFIG_ORC_UNWINDER) += unwind_orc.o -obj-$(CONFIG_FRAME_POINTER_UNWINDER) += unwind_frame.o -obj-$(CONFIG_GUESS_UNWINDER) += unwind_guess.o +obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o +obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o +obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o ### # 64 bit specific files diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index e549bff87c5b4e..353f52fdc35eb8 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -688,7 +688,7 @@ #define BUG_TABLE #endif -#ifdef CONFIG_ORC_UNWINDER +#ifdef CONFIG_UNWINDER_ORC #define ORC_UNWIND_TABLE \ . = ALIGN(4); \ .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index dfdad67d8f6cce..ff21b4dbb3922f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -376,7 +376,7 @@ config STACK_VALIDATION that runtime stack traces are more reliable. This is also a prerequisite for generation of ORC unwind data, which - is needed for CONFIG_ORC_UNWINDER. + is needed for CONFIG_UNWINDER_ORC. For more information, see tools/objtool/Documentation/stack-validation.txt. diff --git a/scripts/Makefile.build b/scripts/Makefile.build index bb831d49bcfd5e..e63af4e19382af 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -259,7 +259,7 @@ ifneq ($(SKIP_STACK_VALIDATION),1) __objtool_obj := $(objtree)/tools/objtool/objtool -objtool_args = $(if $(CONFIG_ORC_UNWINDER),orc generate,check) +objtool_args = $(if $(CONFIG_UNWINDER_ORC),orc generate,check) ifndef CONFIG_FRAME_POINTER objtool_args += --no-fp From b40a923903d0a535676b8d7b22bfe17260c3d35a Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Fri, 13 Oct 2017 15:02:01 -0500 Subject: [PATCH 0817/3326] x86/unwind: Make CONFIG_UNWINDER_ORC=y the default in kconfig for 64-bit commit fc72ae40e30327aa24eb88a24b9c7058f938bd36 upstream. The ORC unwinder has been stable in testing so far. Give it much wider testing by making it the default in kconfig for x86_64. It's not yet supported for 32-bit, so leave frame pointers as the default there. Suggested-by: Ingo Molnar Signed-off-by: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/9b1237bbe7244ed9cdf8db2dcb1253e37e1c341e.1507924831.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/Kconfig.debug | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 728b44a5697830..6293a8768a9123 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -359,27 +359,13 @@ config PUNIT_ATOM_DEBUG choice prompt "Choose kernel unwinder" - default UNWINDER_FRAME_POINTER + default UNWINDER_ORC if X86_64 + default UNWINDER_FRAME_POINTER if X86_32 ---help--- This determines which method will be used for unwinding kernel stack traces for panics, oopses, bugs, warnings, perf, /proc//stack, livepatch, lockdep, and more. -config UNWINDER_FRAME_POINTER - bool "Frame pointer unwinder" - select FRAME_POINTER - ---help--- - This option enables the frame pointer unwinder for unwinding kernel - stack traces. - - The unwinder itself is fast and it uses less RAM than the ORC - unwinder, but the kernel text size will grow by ~3% and the kernel's - overall performance will degrade by roughly 5-10%. - - This option is recommended if you want to use the livepatch - consistency model, as this is currently the only way to get a - reliable stack trace (CONFIG_HAVE_RELIABLE_STACKTRACE). - config UNWINDER_ORC bool "ORC unwinder" depends on X86_64 @@ -396,6 +382,21 @@ config UNWINDER_ORC Enabling this option will increase the kernel's runtime memory usage by roughly 2-4MB, depending on your kernel config. +config UNWINDER_FRAME_POINTER + bool "Frame pointer unwinder" + select FRAME_POINTER + ---help--- + This option enables the frame pointer unwinder for unwinding kernel + stack traces. + + The unwinder itself is fast and it uses less RAM than the ORC + unwinder, but the kernel text size will grow by ~3% and the kernel's + overall performance will degrade by roughly 5-10%. + + This option is recommended if you want to use the livepatch + consistency model, as this is currently the only way to get a + reliable stack trace (CONFIG_HAVE_RELIABLE_STACKTRACE). + config UNWINDER_GUESS bool "Guess unwinder" depends on EXPERT From eb3addb22727034dd733fcc07a3b4248fe523b72 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 13 Oct 2017 14:56:41 -0700 Subject: [PATCH 0818/3326] bitops: Add clear/set_bit32() to linux/bitops.h commit cbe96375025e14fc76f9ed42ee5225120d7210f8 upstream. Add two simple wrappers around set_bit/clear_bit() that accept the common case of an u32 array. This avoids writing casts in all callers. Signed-off-by: Andi Kleen Reviewed-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20171013215645.23166-2-andi@firstfloor.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- include/linux/bitops.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index d03c5dd6185daa..8a7e9924df57c1 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -228,6 +228,32 @@ static inline unsigned long __ffs64(u64 word) return __ffs((unsigned long)word); } +/* + * clear_bit32 - Clear a bit in memory for u32 array + * @nr: Bit to clear + * @addr: u32 * address of bitmap + * + * Same as clear_bit, but avoids needing casts for u32 arrays. + */ + +static __always_inline void clear_bit32(long nr, volatile u32 *addr) +{ + clear_bit(nr, (volatile unsigned long *)addr); +} + +/* + * set_bit32 - Set a bit in memory for u32 array + * @nr: Bit to clear + * @addr: u32 * address of bitmap + * + * Same as set_bit, but avoids needing casts for u32 arrays. + */ + +static __always_inline void set_bit32(long nr, volatile u32 *addr) +{ + set_bit(nr, (volatile unsigned long *)addr); +} + #ifdef __KERNEL__ #ifndef set_mask_bits From d602a3465c77190ef75be052c76ccbdb6c72ed6e Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 13 Oct 2017 14:56:42 -0700 Subject: [PATCH 0819/3326] x86/cpuid: Add generic table for CPUID dependencies commit 0b00de857a648dafe7020878c7a27cf776f5edf4 upstream. Some CPUID features depend on other features. Currently it's possible to to clear dependent features, but not clear the base features, which can cause various interesting problems. This patch implements a generic table to describe dependencies between CPUID features, to be used by all code that clears CPUID. Some subsystems (like XSAVE) had an own implementation of this, but it's better to do it all in a single place for everyone. Then clear_cpu_cap and setup_clear_cpu_cap always look up this table and clear all dependencies too. This is intended to be a practical table: only for features that make sense to clear. If someone for example clears FPU, or other features that are essentially part of the required base feature set, not much is going to work. Handling that is right now out of scope. We're only handling features which can be usefully cleared. Signed-off-by: Andi Kleen Reviewed-by: Thomas Gleixner Cc: Jonathan McDowell Cc: Linus Torvalds Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20171013215645.23166-3-andi@firstfloor.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeature.h | 9 +-- arch/x86/include/asm/cpufeatures.h | 5 ++ arch/x86/kernel/cpu/Makefile | 1 + arch/x86/kernel/cpu/cpuid-deps.c | 113 +++++++++++++++++++++++++++++ 4 files changed, 123 insertions(+), 5 deletions(-) create mode 100644 arch/x86/kernel/cpu/cpuid-deps.c diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 0dfa68438e80e7..bf6a76202a779e 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -126,11 +126,10 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) -#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) -#define setup_clear_cpu_cap(bit) do { \ - clear_cpu_cap(&boot_cpu_data, bit); \ - set_bit(bit, (unsigned long *)cpu_caps_cleared); \ -} while (0) + +extern void setup_clear_cpu_cap(unsigned int bit); +extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); + #define setup_force_cpu_cap(bit) do { \ set_cpu_cap(&boot_cpu_data, bit); \ set_bit(bit, (unsigned long *)cpu_caps_set); \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 793690fbda3625..9a0a5b128a30ac 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -22,6 +22,11 @@ * this feature bit is not displayed in /proc/cpuinfo at all. */ +/* + * When adding new features here that depend on other features, + * please update the table in kernel/cpu/cpuid-deps.c + */ + /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ #define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ #define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index c60922a6638573..90cb82dbba577c 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -23,6 +23,7 @@ obj-y += rdrand.o obj-y += match.o obj-y += bugs.o obj-$(CONFIG_CPU_FREQ) += aperfmperf.o +obj-y += cpuid-deps.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c new file mode 100644 index 00000000000000..e48eb731312042 --- /dev/null +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -0,0 +1,113 @@ +/* Declare dependencies between CPUIDs */ +#include +#include +#include +#include + +struct cpuid_dep { + unsigned int feature; + unsigned int depends; +}; + +/* + * Table of CPUID features that depend on others. + * + * This only includes dependencies that can be usefully disabled, not + * features part of the base set (like FPU). + * + * Note this all is not __init / __initdata because it can be + * called from cpu hotplug. It shouldn't do anything in this case, + * but it's difficult to tell that to the init reference checker. + */ +const static struct cpuid_dep cpuid_deps[] = { + { X86_FEATURE_XSAVEOPT, X86_FEATURE_XSAVE }, + { X86_FEATURE_XSAVEC, X86_FEATURE_XSAVE }, + { X86_FEATURE_XSAVES, X86_FEATURE_XSAVE }, + { X86_FEATURE_AVX, X86_FEATURE_XSAVE }, + { X86_FEATURE_PKU, X86_FEATURE_XSAVE }, + { X86_FEATURE_MPX, X86_FEATURE_XSAVE }, + { X86_FEATURE_XGETBV1, X86_FEATURE_XSAVE }, + { X86_FEATURE_FXSR_OPT, X86_FEATURE_FXSR }, + { X86_FEATURE_XMM, X86_FEATURE_FXSR }, + { X86_FEATURE_XMM2, X86_FEATURE_XMM }, + { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM4_1, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM4_2, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, + { X86_FEATURE_PCLMULQDQ, X86_FEATURE_XMM2 }, + { X86_FEATURE_SSSE3, X86_FEATURE_XMM2, }, + { X86_FEATURE_F16C, X86_FEATURE_XMM2, }, + { X86_FEATURE_AES, X86_FEATURE_XMM2 }, + { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 }, + { X86_FEATURE_FMA, X86_FEATURE_AVX }, + { X86_FEATURE_AVX2, X86_FEATURE_AVX, }, + { X86_FEATURE_AVX512F, X86_FEATURE_AVX, }, + { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512PF, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512ER, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512CD, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512DQ, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512BW, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F }, + {} +}; + +static inline void __clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit) +{ + clear_bit32(bit, c->x86_capability); +} + +static inline void __setup_clear_cpu_cap(unsigned int bit) +{ + clear_cpu_cap(&boot_cpu_data, bit); + set_bit32(bit, cpu_caps_cleared); +} + +static inline void clear_feature(struct cpuinfo_x86 *c, unsigned int feature) +{ + if (!c) + __setup_clear_cpu_cap(feature); + else + __clear_cpu_cap(c, feature); +} + +static void do_clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature) +{ + bool changed; + DECLARE_BITMAP(disable, NCAPINTS * sizeof(u32) * 8); + const struct cpuid_dep *d; + + clear_feature(c, feature); + + /* Collect all features to disable, handling dependencies */ + memset(disable, 0, sizeof(disable)); + __set_bit(feature, disable); + + /* Loop until we get a stable state. */ + do { + changed = false; + for (d = cpuid_deps; d->feature; d++) { + if (!test_bit(d->depends, disable)) + continue; + if (__test_and_set_bit(d->feature, disable)) + continue; + + changed = true; + clear_feature(c, d->feature); + } + } while (changed); +} + +void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature) +{ + do_clear_cpu_cap(c, feature); +} + +void setup_clear_cpu_cap(unsigned int feature) +{ + do_clear_cpu_cap(NULL, feature); +} From f01d7efac7929e3630e25a84e101228ea907bbea Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 13 Oct 2017 14:56:43 -0700 Subject: [PATCH 0820/3326] x86/fpu: Parse clearcpuid= as early XSAVE argument commit 0c2a3913d6f50503f7c59d83a6219e39508cc898 upstream. With a followon patch we want to make clearcpuid affect the XSAVE configuration. But xsave is currently initialized before arguments are parsed. Move the clearcpuid= parsing into the special early xsave argument parsing code. Since clearcpuid= contains a = we need to keep the old __setup around as a dummy, otherwise it would end up as a environment variable in init's environment. Signed-off-by: Andi Kleen Reviewed-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20171013215645.23166-4-andi@firstfloor.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/common.c | 16 +++++++--------- arch/x86/kernel/fpu/init.c | 11 +++++++++++ 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c9176bae7fd8cd..03bb004bb15e2a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1301,18 +1301,16 @@ void print_cpu_info(struct cpuinfo_x86 *c) pr_cont(")\n"); } -static __init int setup_disablecpuid(char *arg) +/* + * clearcpuid= was already parsed in fpu__init_parse_early_param. + * But we need to keep a dummy __setup around otherwise it would + * show up as an environment variable for init. + */ +static __init int setup_clearcpuid(char *arg) { - int bit; - - if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32) - setup_clear_cpu_cap(bit); - else - return 0; - return 1; } -__setup("clearcpuid=", setup_disablecpuid); +__setup("clearcpuid=", setup_clearcpuid); #ifdef CONFIG_X86_64 DEFINE_PER_CPU_FIRST(union irq_stack_union, diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 7affb7e3d9a5b9..6abd83572b0163 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -249,6 +249,10 @@ static void __init fpu__init_system_ctx_switch(void) */ static void __init fpu__init_parse_early_param(void) { + char arg[32]; + char *argptr = arg; + int bit; + if (cmdline_find_option_bool(boot_command_line, "no387")) setup_clear_cpu_cap(X86_FEATURE_FPU); @@ -266,6 +270,13 @@ static void __init fpu__init_parse_early_param(void) if (cmdline_find_option_bool(boot_command_line, "noxsaves")) setup_clear_cpu_cap(X86_FEATURE_XSAVES); + + if (cmdline_find_option(boot_command_line, "clearcpuid", arg, + sizeof(arg)) && + get_option(&argptr, &bit) && + bit >= 0 && + bit < NCAPINTS * 32) + setup_clear_cpu_cap(bit); } /* From 0e7127aa76e05b51f767dbec74b236a1fa5eab06 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 13 Oct 2017 14:56:44 -0700 Subject: [PATCH 0821/3326] x86/fpu: Make XSAVE check the base CPUID features before enabling commit ccb18db2ab9d923df07e7495123fe5fb02329713 upstream. Before enabling XSAVE, not only check the XSAVE specific CPUID bits, but also the base CPUID features of the respective XSAVE feature. This allows to disable individual XSAVE states using the existing clearcpuid= option, which can be useful for performance testing and debugging, and also in general avoids inconsistencies. Signed-off-by: Andi Kleen Reviewed-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20171013215645.23166-5-andi@firstfloor.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/fpu/xstate.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index f1d5476c902209..fb581292975b12 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -15,6 +15,7 @@ #include #include +#include /* * Although we spell it out in here, the Processor Trace @@ -36,6 +37,19 @@ static const char *xfeature_names[] = "unknown xstate feature" , }; +static short xsave_cpuid_features[] __initdata = { + X86_FEATURE_FPU, + X86_FEATURE_XMM, + X86_FEATURE_AVX, + X86_FEATURE_MPX, + X86_FEATURE_MPX, + X86_FEATURE_AVX512F, + X86_FEATURE_AVX512F, + X86_FEATURE_AVX512F, + X86_FEATURE_INTEL_PT, + X86_FEATURE_PKU, +}; + /* * Mask of xstate features supported by the CPU and the kernel: */ @@ -726,6 +740,7 @@ void __init fpu__init_system_xstate(void) unsigned int eax, ebx, ecx, edx; static int on_boot_cpu __initdata = 1; int err; + int i; WARN_ON_FPU(!on_boot_cpu); on_boot_cpu = 0; @@ -759,6 +774,14 @@ void __init fpu__init_system_xstate(void) goto out_disable; } + /* + * Clear XSAVE features that are disabled in the normal CPUID. + */ + for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) { + if (!boot_cpu_has(xsave_cpuid_features[i])) + xfeatures_mask &= ~BIT(i); + } + xfeatures_mask &= fpu__get_supported_xfeatures_mask(); /* Enable xstate instructions to be able to continue with initialization: */ From 110ad51cc874a943792e16258e52ff3360cb4f96 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 13 Oct 2017 14:56:45 -0700 Subject: [PATCH 0822/3326] x86/fpu: Remove the explicit clearing of XSAVE dependent features commit 73e3a7d2a7c3be29a5a22b85026f6cfa5664267f upstream. Clearing a CPU feature with setup_clear_cpu_cap() clears all features which depend on it. Expressing feature dependencies in one place is easier to maintain than keeping functions like fpu__xstate_clear_all_cpu_caps() up to date. The features which depend on XSAVE have their dependency expressed in the dependency table, so its sufficient to clear X86_FEATURE_XSAVE. Remove the explicit clearing of XSAVE dependent features. Signed-off-by: Andi Kleen Reviewed-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20171013215645.23166-6-andi@firstfloor.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/fpu/xstate.c | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index fb581292975b12..87a57b7642d367 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -73,26 +73,6 @@ unsigned int fpu_user_xstate_size; void fpu__xstate_clear_all_cpu_caps(void) { setup_clear_cpu_cap(X86_FEATURE_XSAVE); - setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); - setup_clear_cpu_cap(X86_FEATURE_XSAVEC); - setup_clear_cpu_cap(X86_FEATURE_XSAVES); - setup_clear_cpu_cap(X86_FEATURE_AVX); - setup_clear_cpu_cap(X86_FEATURE_AVX2); - setup_clear_cpu_cap(X86_FEATURE_AVX512F); - setup_clear_cpu_cap(X86_FEATURE_AVX512IFMA); - setup_clear_cpu_cap(X86_FEATURE_AVX512PF); - setup_clear_cpu_cap(X86_FEATURE_AVX512ER); - setup_clear_cpu_cap(X86_FEATURE_AVX512CD); - setup_clear_cpu_cap(X86_FEATURE_AVX512DQ); - setup_clear_cpu_cap(X86_FEATURE_AVX512BW); - setup_clear_cpu_cap(X86_FEATURE_AVX512VL); - setup_clear_cpu_cap(X86_FEATURE_MPX); - setup_clear_cpu_cap(X86_FEATURE_XGETBV1); - setup_clear_cpu_cap(X86_FEATURE_AVX512VBMI); - setup_clear_cpu_cap(X86_FEATURE_PKU); - setup_clear_cpu_cap(X86_FEATURE_AVX512_4VNNIW); - setup_clear_cpu_cap(X86_FEATURE_AVX512_4FMAPS); - setup_clear_cpu_cap(X86_FEATURE_AVX512_VPOPCNTDQ); } /* From 3b57d66c8e53390b0035c578063db22c73ebce1a Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 16:22:31 -0700 Subject: [PATCH 0823/3326] x86/platform/UV: Convert timers to use timer_setup() commit 376f3bcebdc999cc737d9052109cc33b573b3a8b upstream. In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Signed-off-by: Kees Cook Signed-off-by: Thomas Gleixner Cc: Dimitri Sivanich Cc: Russ Anderson Cc: Mike Travis Link: https://lkml.kernel.org/r/20171016232231.GA100493@beast Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/apic/x2apic_uv_x.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 0d57bb9079c998..c0b694810ff45a 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -920,9 +920,8 @@ static __init void uv_rtc_init(void) /* * percpu heartbeat timer */ -static void uv_heartbeat(unsigned long ignored) +static void uv_heartbeat(struct timer_list *timer) { - struct timer_list *timer = &uv_scir_info->timer; unsigned char bits = uv_scir_info->state; /* Flip heartbeat bit: */ @@ -947,7 +946,7 @@ static int uv_heartbeat_enable(unsigned int cpu) struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer; uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY); - setup_pinned_timer(timer, uv_heartbeat, cpu); + timer_setup(timer, uv_heartbeat, TIMER_PINNED); timer->expires = jiffies + SCIR_CPU_HB_INTERVAL; add_timer_on(timer, cpu); uv_cpu_scir_info(cpu)->enabled = 1; From 61d1ad3631150386f3c142daff97913c1770e0fa Mon Sep 17 00:00:00 2001 From: Kamalesh Babulal Date: Sat, 14 Oct 2017 20:17:54 +0530 Subject: [PATCH 0824/3326] objtool: Print top level commands on incorrect usage commit 6a93bb7e4a7d6670677d5b0eb980936eb9cc5d2e upstream. Print top-level objtool commands, along with the error on incorrect command line usage. Objtool command line parser exit's with code 129, for incorrect usage. Convert the cmd_usage() exit code also, to maintain consistency across objtool. After the patch: $ ./objtool -j Unknown option: -j usage: objtool COMMAND [ARGS] Commands: check Perform stack metadata validation on an object file orc Generate in-place ORC unwind tables for an object file $ echo $? 129 Signed-off-by: Kamalesh Babulal Acked-by: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1507992474-16142-1-git-send-email-kamalesh@linux.vnet.ibm.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/objtool/objtool.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c index 31e0f914384006..07f32991982840 100644 --- a/tools/objtool/objtool.c +++ b/tools/objtool/objtool.c @@ -70,7 +70,7 @@ static void cmd_usage(void) printf("\n"); - exit(1); + exit(129); } static void handle_options(int *argc, const char ***argv) @@ -86,9 +86,7 @@ static void handle_options(int *argc, const char ***argv) break; } else { fprintf(stderr, "Unknown option: %s\n", cmd); - fprintf(stderr, "\n Usage: %s\n", - objtool_usage_string); - exit(1); + cmd_usage(); } (*argv)++; From 36295155d8d4c3f9fb425faac9120559c051b861 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 18 Oct 2017 19:39:35 +0200 Subject: [PATCH 0825/3326] x86/cpuid: Prevent out of bound access in do_clear_cpu_cap() commit 57b8b1a1856adaa849d02d547411a553a531022b upstream. do_clear_cpu_cap() allocates a bitmap to keep track of disabled feature dependencies. That bitmap is sized NCAPINTS * BITS_PER_INIT. The possible 'features' which can be handed in are larger than this, because after the capabilities the bug 'feature' bits occupy another 32bit. Not really obvious... So clearing any of the misfeature bits, as 32bit does for the F00F bug, accesses that bitmap out of bounds thereby corrupting the stack. Size the bitmap proper and add a sanity check to catch accidental out of bound access. Fixes: 0b00de857a64 ("x86/cpuid: Add generic table for CPUID dependencies") Reported-by: kernel test robot Signed-off-by: Thomas Gleixner Cc: Andi Kleen Cc: Borislav Petkov Link: https://lkml.kernel.org/r/20171018022023.GA12058@yexl-desktop Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/cpuid-deps.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index e48eb731312042..c1d49842a411fb 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -75,11 +75,17 @@ static inline void clear_feature(struct cpuinfo_x86 *c, unsigned int feature) __clear_cpu_cap(c, feature); } +/* Take the capabilities and the BUG bits into account */ +#define MAX_FEATURE_BITS ((NCAPINTS + NBUGINTS) * sizeof(u32) * 8) + static void do_clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature) { - bool changed; - DECLARE_BITMAP(disable, NCAPINTS * sizeof(u32) * 8); + DECLARE_BITMAP(disable, MAX_FEATURE_BITS); const struct cpuid_dep *d; + bool changed; + + if (WARN_ON(feature >= MAX_FEATURE_BITS)) + return; clear_feature(c, feature); From 4afaf6ea65acb07e151470580d89e6a2c0268610 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 29 Sep 2017 17:08:16 +0300 Subject: [PATCH 0826/3326] mm/sparsemem: Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y commit 83e3c48729d9ebb7af5a31a504f3fd6aff0348c4 upstream. Size of the mem_section[] array depends on the size of the physical address space. In preparation for boot-time switching between paging modes on x86-64 we need to make the allocation of mem_section[] dynamic, because otherwise we waste a lot of RAM: with CONFIG_NODE_SHIFT=10, mem_section[] size is 32kB for 4-level paging and 2MB for 5-level paging mode. The patch allocates the array on the first call to sparse_memory_present_with_active_regions(). Signed-off-by: Kirill A. Shutemov Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Cyrill Gorcunov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20170929140821.37654-2-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- include/linux/mmzone.h | 6 +++++- mm/page_alloc.c | 10 ++++++++++ mm/sparse.c | 17 +++++++++++------ 3 files changed, 26 insertions(+), 7 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 18b06983131a35..f0938257ee6d38 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1152,13 +1152,17 @@ struct mem_section { #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) #ifdef CONFIG_SPARSEMEM_EXTREME -extern struct mem_section *mem_section[NR_SECTION_ROOTS]; +extern struct mem_section **mem_section; #else extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; #endif static inline struct mem_section *__nr_to_section(unsigned long nr) { +#ifdef CONFIG_SPARSEMEM_EXTREME + if (!mem_section) + return NULL; +#endif if (!mem_section[SECTION_NR_TO_ROOT(nr)]) return NULL; return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d51c2087c498c6..02cf30bd01cd9d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5651,6 +5651,16 @@ void __init sparse_memory_present_with_active_regions(int nid) unsigned long start_pfn, end_pfn; int i, this_nid; +#ifdef CONFIG_SPARSEMEM_EXTREME + if (!mem_section) { + unsigned long size, align; + + size = sizeof(struct mem_section) * NR_SECTION_ROOTS; + align = 1 << (INTERNODE_CACHE_SHIFT); + mem_section = memblock_virt_alloc(size, align); + } +#endif + for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) memory_present(this_nid, start_pfn, end_pfn); } diff --git a/mm/sparse.c b/mm/sparse.c index 4900707ae146cf..044138852bafce 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -23,8 +23,7 @@ * 1) mem_section - memory sections, mem_map's for valid memory */ #ifdef CONFIG_SPARSEMEM_EXTREME -struct mem_section *mem_section[NR_SECTION_ROOTS] - ____cacheline_internodealigned_in_smp; +struct mem_section **mem_section; #else struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] ____cacheline_internodealigned_in_smp; @@ -101,7 +100,7 @@ static inline int sparse_index_init(unsigned long section_nr, int nid) int __section_nr(struct mem_section* ms) { unsigned long root_nr; - struct mem_section* root; + struct mem_section *root = NULL; for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); @@ -112,7 +111,7 @@ int __section_nr(struct mem_section* ms) break; } - VM_BUG_ON(root_nr == NR_SECTION_ROOTS); + VM_BUG_ON(!root); return (root_nr * SECTIONS_PER_ROOT) + (ms - root); } @@ -330,11 +329,17 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, static void __init check_usemap_section_nr(int nid, unsigned long *usemap) { unsigned long usemap_snr, pgdat_snr; - static unsigned long old_usemap_snr = NR_MEM_SECTIONS; - static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; + static unsigned long old_usemap_snr; + static unsigned long old_pgdat_snr; struct pglist_data *pgdat = NODE_DATA(nid); int usemap_nid; + /* First call */ + if (!old_usemap_snr) { + old_usemap_snr = NR_MEM_SECTIONS; + old_pgdat_snr = NR_MEM_SECTIONS; + } + usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); if (usemap_snr == pgdat_snr) From 873f59b8bd88c897fc1545efdf5742fc63f7a252 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Fri, 29 Sep 2017 17:08:18 +0300 Subject: [PATCH 0827/3326] x86/kasan: Use the same shadow offset for 4- and 5-level paging commit 12a8cc7fcf54a8575f094be1e99032ec38aa045c upstream. We are going to support boot-time switching between 4- and 5-level paging. For KASAN it means we cannot have different KASAN_SHADOW_OFFSET for different paging modes: the constant is passed to gcc to generate code and cannot be changed at runtime. This patch changes KASAN code to use 0xdffffc0000000000 as shadow offset for both 4- and 5-level paging. For 5-level paging it means that shadow memory region is not aligned to PGD boundary anymore and we have to handle unaligned parts of the region properly. In addition, we have to exclude paravirt code from KASAN instrumentation as we now use set_pgd() before KASAN is fully ready. [kirill.shutemov@linux.intel.com: clenaup, changelog message] Signed-off-by: Andrey Ryabinin Signed-off-by: Kirill A. Shutemov Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Cyrill Gorcunov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20170929140821.37654-4-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- Documentation/x86/x86_64/mm.txt | 2 +- arch/x86/Kconfig | 1 - arch/x86/kernel/Makefile | 3 +- arch/x86/mm/kasan_init_64.c | 101 +++++++++++++++++++++++++------- 4 files changed, 83 insertions(+), 24 deletions(-) diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index b0798e281aa6a6..3448e675b4623c 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -34,7 +34,7 @@ ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) ... unused hole ... -ffd8000000000000 - fff7ffffffffffff (=53 bits) kasan shadow memory (8PB) +ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) ... unused hole ... ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ... unused hole ... diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6aad9f070b9d33..59d5b3fa88e601 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -303,7 +303,6 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC config KASAN_SHADOW_OFFSET hex depends on KASAN - default 0xdff8000000000000 if X86_5LEVEL default 0xdffffc0000000000 config HAVE_INTEL_TXT diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index d12da41f72daf0..295abaa58addef 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -25,7 +25,8 @@ endif KASAN_SANITIZE_head$(BITS).o := n KASAN_SANITIZE_dumpstack.o := n KASAN_SANITIZE_dumpstack_$(BITS).o := n -KASAN_SANITIZE_stacktrace.o := n +KASAN_SANITIZE_stacktrace.o := n +KASAN_SANITIZE_paravirt.o := n OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 8f5be3eb40ddb3..2b60dc6e64b1da 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -16,6 +16,8 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES]; +static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); + static int __init map_range(struct range *range) { unsigned long start; @@ -31,8 +33,10 @@ static void __init clear_pgds(unsigned long start, unsigned long end) { pgd_t *pgd; + /* See comment in kasan_init() */ + unsigned long pgd_end = end & PGDIR_MASK; - for (; start < end; start += PGDIR_SIZE) { + for (; start < pgd_end; start += PGDIR_SIZE) { pgd = pgd_offset_k(start); /* * With folded p4d, pgd_clear() is nop, use p4d_clear() @@ -43,29 +47,61 @@ static void __init clear_pgds(unsigned long start, else pgd_clear(pgd); } + + pgd = pgd_offset_k(start); + for (; start < end; start += P4D_SIZE) + p4d_clear(p4d_offset(pgd, start)); +} + +static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr) +{ + unsigned long p4d; + + if (!IS_ENABLED(CONFIG_X86_5LEVEL)) + return (p4d_t *)pgd; + + p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; + p4d += __START_KERNEL_map - phys_base; + return (p4d_t *)p4d + p4d_index(addr); +} + +static void __init kasan_early_p4d_populate(pgd_t *pgd, + unsigned long addr, + unsigned long end) +{ + pgd_t pgd_entry; + p4d_t *p4d, p4d_entry; + unsigned long next; + + if (pgd_none(*pgd)) { + pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d)); + set_pgd(pgd, pgd_entry); + } + + p4d = early_p4d_offset(pgd, addr); + do { + next = p4d_addr_end(addr, end); + + if (!p4d_none(*p4d)) + continue; + + p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud)); + set_p4d(p4d, p4d_entry); + } while (p4d++, addr = next, addr != end && p4d_none(*p4d)); } static void __init kasan_map_early_shadow(pgd_t *pgd) { - int i; - unsigned long start = KASAN_SHADOW_START; + /* See comment in kasan_init() */ + unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK; unsigned long end = KASAN_SHADOW_END; + unsigned long next; - for (i = pgd_index(start); start < end; i++) { - switch (CONFIG_PGTABLE_LEVELS) { - case 4: - pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) | - _KERNPG_TABLE); - break; - case 5: - pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) | - _KERNPG_TABLE); - break; - default: - BUILD_BUG(); - } - start += PGDIR_SIZE; - } + pgd += pgd_index(addr); + do { + next = pgd_addr_end(addr, end); + kasan_early_p4d_populate(pgd, addr, next); + } while (pgd++, addr = next, addr != end); } #ifdef CONFIG_KASAN_INLINE @@ -102,7 +138,7 @@ void __init kasan_early_init(void) for (i = 0; i < PTRS_PER_PUD; i++) kasan_zero_pud[i] = __pud(pud_val); - for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++) + for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++) kasan_zero_p4d[i] = __p4d(p4d_val); kasan_map_early_shadow(early_top_pgt); @@ -118,12 +154,35 @@ void __init kasan_init(void) #endif memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt)); + + /* + * We use the same shadow offset for 4- and 5-level paging to + * facilitate boot-time switching between paging modes. + * As result in 5-level paging mode KASAN_SHADOW_START and + * KASAN_SHADOW_END are not aligned to PGD boundary. + * + * KASAN_SHADOW_START doesn't share PGD with anything else. + * We claim whole PGD entry to make things easier. + * + * KASAN_SHADOW_END lands in the last PGD entry and it collides with + * bunch of things like kernel code, modules, EFI mapping, etc. + * We need to take extra steps to not overwrite them. + */ + if (IS_ENABLED(CONFIG_X86_5LEVEL)) { + void *ptr; + + ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END)); + memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table)); + set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)], + __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE)); + } + load_cr3(early_top_pgt); __flush_tlb_all(); - clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); + clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END); - kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, + kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK), kasan_mem_to_shadow((void *)PAGE_OFFSET)); for (i = 0; i < E820_MAX_ENTRIES; i++) { From 13bda9cfea5116f15a69725c15bc2614a6c803dc Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 29 Sep 2017 17:08:19 +0300 Subject: [PATCH 0828/3326] x86/xen: Provide pre-built page tables only for CONFIG_XEN_PV=y and CONFIG_XEN_PVH=y commit 4375c29985f155d7eb2346615d84e62d1b673682 upstream. Looks like we only need pre-built page tables in the CONFIG_XEN_PV=y and CONFIG_XEN_PVH=y cases. Let's not provide them for other configurations. Signed-off-by: Kirill A. Shutemov Reviewed-by: Juergen Gross Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Cyrill Gorcunov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20170929140821.37654-5-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/head_64.S | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 6733cac7cba5b8..9072d324d4303d 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -38,11 +38,12 @@ * */ -#define p4d_index(x) (((x) >> P4D_SHIFT) & (PTRS_PER_P4D-1)) #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) +#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE) PGD_START_KERNEL = pgd_index(__START_KERNEL_map) +#endif L3_START_KERNEL = pud_index(__START_KERNEL_map) .text @@ -365,10 +366,7 @@ NEXT_PAGE(early_dynamic_pgts) .data -#ifndef CONFIG_XEN -NEXT_PAGE(init_top_pgt) - .fill 512,8,0 -#else +#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) NEXT_PAGE(init_top_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + PGD_PAGE_OFFSET*8, 0 @@ -385,6 +383,9 @@ NEXT_PAGE(level2_ident_pgt) * Don't set NX because code runs from these pages. */ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) +#else +NEXT_PAGE(init_top_pgt) + .fill 512,8,0 #endif #ifdef CONFIG_X86_5LEVEL From af02cd973d86cf3f6bd6eea928ccc9e70a4132d0 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 29 Sep 2017 17:08:20 +0300 Subject: [PATCH 0829/3326] x86/xen: Drop 5-level paging support code from the XEN_PV code commit 773dd2fca581b0a80e5a33332cc8ee67e5a79cba upstream. It was decided 5-level paging is not going to be supported in XEN_PV. Let's drop the dead code from the XEN_PV code. Tested-by: Juergen Gross Signed-off-by: Kirill A. Shutemov Reviewed-by: Juergen Gross Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Cyrill Gorcunov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20170929140821.37654-6-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/xen/mmu_pv.c | 159 ++++++++++++++++-------------------------- 1 file changed, 60 insertions(+), 99 deletions(-) diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 71495f1a86d72f..2ccdaba31a0778 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -449,7 +449,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd) } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); -#if CONFIG_PGTABLE_LEVELS == 4 +#ifdef CONFIG_X86_64 __visible pudval_t xen_pud_val(pud_t pud) { return pte_mfn_to_pfn(pud.pud); @@ -538,7 +538,7 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val) xen_mc_issue(PARAVIRT_LAZY_MMU); } -#endif /* CONFIG_PGTABLE_LEVELS == 4 */ +#endif /* CONFIG_X86_64 */ static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, int (*func)(struct mm_struct *mm, struct page *, enum pt_level), @@ -580,21 +580,17 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d, int (*func)(struct mm_struct *mm, struct page *, enum pt_level), bool last, unsigned long limit) { - int i, nr, flush = 0; + int flush = 0; + pud_t *pud; - nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D; - for (i = 0; i < nr; i++) { - pud_t *pud; - if (p4d_none(p4d[i])) - continue; + if (p4d_none(*p4d)) + return flush; - pud = pud_offset(&p4d[i], 0); - if (PTRS_PER_PUD > 1) - flush |= (*func)(mm, virt_to_page(pud), PT_PUD); - flush |= xen_pud_walk(mm, pud, func, - last && i == nr - 1, limit); - } + pud = pud_offset(p4d, 0); + if (PTRS_PER_PUD > 1) + flush |= (*func)(mm, virt_to_page(pud), PT_PUD); + flush |= xen_pud_walk(mm, pud, func, last, limit); return flush; } @@ -644,8 +640,6 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, continue; p4d = p4d_offset(&pgd[i], 0); - if (PTRS_PER_P4D > 1) - flush |= (*func)(mm, virt_to_page(p4d), PT_P4D); flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit); } @@ -1176,22 +1170,14 @@ static void __init xen_cleanmfnmap(unsigned long vaddr) { pgd_t *pgd; p4d_t *p4d; - unsigned int i; bool unpin; unpin = (vaddr == 2 * PGDIR_SIZE); vaddr &= PMD_MASK; pgd = pgd_offset_k(vaddr); p4d = p4d_offset(pgd, 0); - for (i = 0; i < PTRS_PER_P4D; i++) { - if (p4d_none(p4d[i])) - continue; - xen_cleanmfnmap_p4d(p4d + i, unpin); - } - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - set_pgd(pgd, __pgd(0)); - xen_cleanmfnmap_free_pgtbl(p4d, unpin); - } + if (!p4d_none(*p4d)) + xen_cleanmfnmap_p4d(p4d, unpin); } static void __init xen_pagetable_p2m_free(void) @@ -1692,7 +1678,7 @@ static void xen_release_pmd(unsigned long pfn) xen_release_ptpage(pfn, PT_PMD); } -#if CONFIG_PGTABLE_LEVELS >= 4 +#ifdef CONFIG_X86_64 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) { xen_alloc_ptpage(mm, pfn, PT_PUD); @@ -2029,13 +2015,12 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) */ void __init xen_relocate_p2m(void) { - phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys; + phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys; unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end; - int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d; + int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud; pte_t *pt; pmd_t *pmd; pud_t *pud; - p4d_t *p4d = NULL; pgd_t *pgd; unsigned long *new_p2m; int save_pud; @@ -2045,11 +2030,7 @@ void __init xen_relocate_p2m(void) n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT; n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT; n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT; - if (PTRS_PER_P4D > 1) - n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT; - else - n_p4d = 0; - n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d; + n_frames = n_pte + n_pt + n_pmd + n_pud; new_area = xen_find_free_area(PFN_PHYS(n_frames)); if (!new_area) { @@ -2065,76 +2046,56 @@ void __init xen_relocate_p2m(void) * To avoid any possible virtual address collision, just use * 2 * PUD_SIZE for the new area. */ - p4d_phys = new_area; - pud_phys = p4d_phys + PFN_PHYS(n_p4d); + pud_phys = new_area; pmd_phys = pud_phys + PFN_PHYS(n_pud); pt_phys = pmd_phys + PFN_PHYS(n_pmd); p2m_pfn = PFN_DOWN(pt_phys) + n_pt; pgd = __va(read_cr3_pa()); new_p2m = (unsigned long *)(2 * PGDIR_SIZE); - idx_p4d = 0; save_pud = n_pud; - do { - if (n_p4d > 0) { - p4d = early_memremap(p4d_phys, PAGE_SIZE); - clear_page(p4d); - n_pud = min(save_pud, PTRS_PER_P4D); - } - for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { - pud = early_memremap(pud_phys, PAGE_SIZE); - clear_page(pud); - for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD); - idx_pmd++) { - pmd = early_memremap(pmd_phys, PAGE_SIZE); - clear_page(pmd); - for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD); - idx_pt++) { - pt = early_memremap(pt_phys, PAGE_SIZE); - clear_page(pt); - for (idx_pte = 0; - idx_pte < min(n_pte, PTRS_PER_PTE); - idx_pte++) { - set_pte(pt + idx_pte, - pfn_pte(p2m_pfn, PAGE_KERNEL)); - p2m_pfn++; - } - n_pte -= PTRS_PER_PTE; - early_memunmap(pt, PAGE_SIZE); - make_lowmem_page_readonly(__va(pt_phys)); - pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, - PFN_DOWN(pt_phys)); - set_pmd(pmd + idx_pt, - __pmd(_PAGE_TABLE | pt_phys)); - pt_phys += PAGE_SIZE; + for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { + pud = early_memremap(pud_phys, PAGE_SIZE); + clear_page(pud); + for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD); + idx_pmd++) { + pmd = early_memremap(pmd_phys, PAGE_SIZE); + clear_page(pmd); + for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD); + idx_pt++) { + pt = early_memremap(pt_phys, PAGE_SIZE); + clear_page(pt); + for (idx_pte = 0; + idx_pte < min(n_pte, PTRS_PER_PTE); + idx_pte++) { + set_pte(pt + idx_pte, + pfn_pte(p2m_pfn, PAGE_KERNEL)); + p2m_pfn++; } - n_pt -= PTRS_PER_PMD; - early_memunmap(pmd, PAGE_SIZE); - make_lowmem_page_readonly(__va(pmd_phys)); - pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, - PFN_DOWN(pmd_phys)); - set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys)); - pmd_phys += PAGE_SIZE; + n_pte -= PTRS_PER_PTE; + early_memunmap(pt, PAGE_SIZE); + make_lowmem_page_readonly(__va(pt_phys)); + pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, + PFN_DOWN(pt_phys)); + set_pmd(pmd + idx_pt, + __pmd(_PAGE_TABLE | pt_phys)); + pt_phys += PAGE_SIZE; } - n_pmd -= PTRS_PER_PUD; - early_memunmap(pud, PAGE_SIZE); - make_lowmem_page_readonly(__va(pud_phys)); - pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys)); - if (n_p4d > 0) - set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys)); - else - set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); - pud_phys += PAGE_SIZE; - } - if (n_p4d > 0) { - save_pud -= PTRS_PER_P4D; - early_memunmap(p4d, PAGE_SIZE); - make_lowmem_page_readonly(__va(p4d_phys)); - pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys)); - set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys)); - p4d_phys += PAGE_SIZE; + n_pt -= PTRS_PER_PMD; + early_memunmap(pmd, PAGE_SIZE); + make_lowmem_page_readonly(__va(pmd_phys)); + pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, + PFN_DOWN(pmd_phys)); + set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys)); + pmd_phys += PAGE_SIZE; } - } while (++idx_p4d < n_p4d); + n_pmd -= PTRS_PER_PUD; + early_memunmap(pud, PAGE_SIZE); + make_lowmem_page_readonly(__va(pud_phys)); + pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys)); + set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); + pud_phys += PAGE_SIZE; + } /* Now copy the old p2m info to the new area. */ memcpy(new_p2m, xen_p2m_addr, size); @@ -2361,7 +2322,7 @@ static void __init xen_post_allocator_init(void) pv_mmu_ops.set_pte = xen_set_pte; pv_mmu_ops.set_pmd = xen_set_pmd; pv_mmu_ops.set_pud = xen_set_pud; -#if CONFIG_PGTABLE_LEVELS >= 4 +#ifdef CONFIG_X86_64 pv_mmu_ops.set_p4d = xen_set_p4d; #endif @@ -2371,7 +2332,7 @@ static void __init xen_post_allocator_init(void) pv_mmu_ops.alloc_pmd = xen_alloc_pmd; pv_mmu_ops.release_pte = xen_release_pte; pv_mmu_ops.release_pmd = xen_release_pmd; -#if CONFIG_PGTABLE_LEVELS >= 4 +#ifdef CONFIG_X86_64 pv_mmu_ops.alloc_pud = xen_alloc_pud; pv_mmu_ops.release_pud = xen_release_pud; #endif @@ -2435,14 +2396,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), -#if CONFIG_PGTABLE_LEVELS >= 4 +#ifdef CONFIG_X86_64 .pud_val = PV_CALLEE_SAVE(xen_pud_val), .make_pud = PV_CALLEE_SAVE(xen_make_pud), .set_p4d = xen_set_p4d_hyper, .alloc_pud = xen_alloc_pmd_init, .release_pud = xen_release_pmd_init, -#endif /* CONFIG_PGTABLE_LEVELS == 4 */ +#endif /* CONFIG_X86_64 */ .activate_mm = xen_activate_mm, .dup_mmap = xen_dup_mmap, From 09807080a97cedc160bc42efd3e8c3a3b72a29c7 Mon Sep 17 00:00:00 2001 From: Dongjiu Geng Date: Tue, 17 Oct 2017 16:02:20 +0800 Subject: [PATCH 0830/3326] ACPI / APEI: remove the unused dead-code for SEA/NMI notification type commit c49870e89f4d2c21c76ebe90568246bb0f3572b7 upstream. For the SEA notification, the two functions ghes_sea_add() and ghes_sea_remove() are only called when CONFIG_ACPI_APEI_SEA is defined. If not, it will return errors in the ghes_probe() and not continue. If the probe is failed, the ghes_sea_remove() also has no chance to be called. Hence, remove the unnecessary handling when CONFIG_ACPI_APEI_SEA is not defined. For the NMI notification, it has the same issue as SEA notification, so also remove the unused dead-code for it. Signed-off-by: Dongjiu Geng Tested-by: Tyler Baicar Reviewed-by: Borislav Petkov Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/apei/ghes.c | 33 +++++---------------------------- 1 file changed, 5 insertions(+), 28 deletions(-) diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 69ef0b6bf25dfc..cb7aceae35537a 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -852,17 +852,8 @@ static void ghes_sea_remove(struct ghes *ghes) synchronize_rcu(); } #else /* CONFIG_ACPI_APEI_SEA */ -static inline void ghes_sea_add(struct ghes *ghes) -{ - pr_err(GHES_PFX "ID: %d, trying to add SEA notification which is not supported\n", - ghes->generic->header.source_id); -} - -static inline void ghes_sea_remove(struct ghes *ghes) -{ - pr_err(GHES_PFX "ID: %d, trying to remove SEA notification which is not supported\n", - ghes->generic->header.source_id); -} +static inline void ghes_sea_add(struct ghes *ghes) { } +static inline void ghes_sea_remove(struct ghes *ghes) { } #endif /* CONFIG_ACPI_APEI_SEA */ #ifdef CONFIG_HAVE_ACPI_APEI_NMI @@ -1064,23 +1055,9 @@ static void ghes_nmi_init_cxt(void) init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); } #else /* CONFIG_HAVE_ACPI_APEI_NMI */ -static inline void ghes_nmi_add(struct ghes *ghes) -{ - pr_err(GHES_PFX "ID: %d, trying to add NMI notification which is not supported!\n", - ghes->generic->header.source_id); - BUG(); -} - -static inline void ghes_nmi_remove(struct ghes *ghes) -{ - pr_err(GHES_PFX "ID: %d, trying to remove NMI notification which is not supported!\n", - ghes->generic->header.source_id); - BUG(); -} - -static inline void ghes_nmi_init_cxt(void) -{ -} +static inline void ghes_nmi_add(struct ghes *ghes) { } +static inline void ghes_nmi_remove(struct ghes *ghes) { } +static inline void ghes_nmi_init_cxt(void) { } #endif /* CONFIG_HAVE_ACPI_APEI_NMI */ static int ghes_probe(struct platform_device *ghes_dev) From 20e9bfd7b8a3463c5ecc1d22b60be30a32607dc4 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Fri, 20 Oct 2017 11:21:35 -0500 Subject: [PATCH 0831/3326] x86/asm: Don't use the confusing '.ifeq' directive commit 82c62fa0c49aa305104013cee4468772799bb391 upstream. I find the '.ifeq ' directive to be confusing. Reading it quickly seems to suggest its opposite meaning, or that it's missing an argument. Improve readability by replacing all of its x86 uses with '.if == 0'. Signed-off-by: Josh Poimboeuf Cc: Andrei Vagin Cc: Andy Lutomirski Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/757da028e802c7e98d23fbab8d234b1063e161cf.1508516398.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 2 +- arch/x86/kernel/head_32.S | 2 +- arch/x86/kernel/head_64.S | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 2e956afe272c09..5557e94c5f1c13 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -818,7 +818,7 @@ ENTRY(\sym) ASM_CLAC - .ifeq \has_error_code + .if \has_error_code == 0 pushq $-1 /* ORIG_RAX: no syscall to restart */ .endif diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index f1d528bb66a6ce..46c9f17b18e96b 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -402,7 +402,7 @@ ENTRY(early_idt_handler_array) # 24(%rsp) error code i = 0 .rept NUM_EXCEPTION_VECTORS - .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 + .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 pushl $0 # Dummy error code, to make stack frame uniform .endif pushl $i # 20(%esp) Vector number diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 9072d324d4303d..078eab728b4791 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -275,7 +275,7 @@ ENDPROC(start_cpu0) ENTRY(early_idt_handler_array) i = 0 .rept NUM_EXCEPTION_VECTORS - .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 + .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 UNWIND_HINT_IRET_REGS pushq $0 # Dummy error code, to make stack frame uniform .else From 3df257ddc5f48214e229b26f3e3d5f42e0f45380 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 27 Oct 2017 13:11:10 +0900 Subject: [PATCH 0832/3326] x86/build: Beautify build log of syscall headers commit af8e947079a7dab0480b5d6db6b093fd04b86fc9 upstream. This makes the build log look nicer. Before: SYSTBL arch/x86/entry/syscalls/../../include/generated/asm/syscalls_32.h SYSHDR arch/x86/entry/syscalls/../../include/generated/asm/unistd_32_ia32.h SYSHDR arch/x86/entry/syscalls/../../include/generated/asm/unistd_64_x32.h SYSTBL arch/x86/entry/syscalls/../../include/generated/asm/syscalls_64.h SYSHDR arch/x86/entry/syscalls/../../include/generated/uapi/asm/unistd_32.h SYSHDR arch/x86/entry/syscalls/../../include/generated/uapi/asm/unistd_64.h SYSHDR arch/x86/entry/syscalls/../../include/generated/uapi/asm/unistd_x32.h After: SYSTBL arch/x86/include/generated/asm/syscalls_32.h SYSHDR arch/x86/include/generated/asm/unistd_32_ia32.h SYSHDR arch/x86/include/generated/asm/unistd_64_x32.h SYSTBL arch/x86/include/generated/asm/syscalls_64.h SYSHDR arch/x86/include/generated/uapi/asm/unistd_32.h SYSHDR arch/x86/include/generated/uapi/asm/unistd_64.h SYSHDR arch/x86/include/generated/uapi/asm/unistd_x32.h Signed-off-by: Masahiro Yamada Acked-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: "H. Peter Anvin" Cc: linux-kbuild@vger.kernel.org Link: http://lkml.kernel.org/r/1509077470-2735-1-git-send-email-yamada.masahiro@socionext.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/syscalls/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/entry/syscalls/Makefile b/arch/x86/entry/syscalls/Makefile index 331f1dca508544..6fb9b57ed5ba05 100644 --- a/arch/x86/entry/syscalls/Makefile +++ b/arch/x86/entry/syscalls/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -out := $(obj)/../../include/generated/asm -uapi := $(obj)/../../include/generated/uapi/asm +out := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm # Create output directory if not already present _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \ From c60238f5712ba5a7d8391914f8290ae1bf5c08c0 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Sat, 28 Oct 2017 09:30:38 +0800 Subject: [PATCH 0833/3326] x86/mm/64: Rename the register_page_bootmem_memmap() 'size' parameter to 'nr_pages' commit 15670bfe19905b1dcbb63137f40d718b59d84479 upstream. register_page_bootmem_memmap()'s 3rd 'size' parameter is named in a somewhat misleading fashion - rename it to 'nr_pages' which makes the units of it much clearer. Meanwhile rename the existing local variable 'nr_pages' to 'nr_pmd_pages', a more expressive name, to avoid conflict with new function parameter 'nr_pages'. (Also clean up the unnecessary parentheses in which get_order() is called.) Signed-off-by: Baoquan He Acked-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: akpm@linux-foundation.org Link: http://lkml.kernel.org/r/1509154238-23250-1-git-send-email-bhe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/init_64.c | 10 +++++----- include/linux/mm.h | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 048fbe8fc27401..adcea90a2046e9 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1426,16 +1426,16 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) void register_page_bootmem_memmap(unsigned long section_nr, - struct page *start_page, unsigned long size) + struct page *start_page, unsigned long nr_pages) { unsigned long addr = (unsigned long)start_page; - unsigned long end = (unsigned long)(start_page + size); + unsigned long end = (unsigned long)(start_page + nr_pages); unsigned long next; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; - unsigned int nr_pages; + unsigned int nr_pmd_pages; struct page *page; for (; addr < end; addr = next) { @@ -1482,9 +1482,9 @@ void register_page_bootmem_memmap(unsigned long section_nr, if (pmd_none(*pmd)) continue; - nr_pages = 1 << (get_order(PMD_SIZE)); + nr_pmd_pages = 1 << get_order(PMD_SIZE); page = pmd_page(*pmd); - while (nr_pages--) + while (nr_pmd_pages--) get_page_bootmem(section_nr, page++, SECTION_INFO); } diff --git a/include/linux/mm.h b/include/linux/mm.h index db647d428100c2..f50deada0f5c53 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2510,7 +2510,7 @@ void vmemmap_populate_print_last(void); void vmemmap_free(unsigned long start, unsigned long end); #endif void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, - unsigned long size); + unsigned long nr_pages); enum mf_flags { MF_COUNT_INCREASED = 1 << 0, From 12dc3fa30178a4635e435ed2b4e543f76c10dc93 Mon Sep 17 00:00:00 2001 From: Gayatri Kammela Date: Mon, 30 Oct 2017 18:20:29 -0700 Subject: [PATCH 0834/3326] x86/cpufeatures: Enable new SSE/AVX/AVX512 CPU features commit c128dbfa0f879f8ce7b79054037889b0b2240728 upstream. Add a few new SSE/AVX/AVX512 instruction groups/features for enumeration in /proc/cpuinfo: AVX512_VBMI2, GFNI, VAES, VPCLMULQDQ, AVX512_VNNI, AVX512_BITALG. CPUID.(EAX=7,ECX=0):ECX[bit 6] AVX512_VBMI2 CPUID.(EAX=7,ECX=0):ECX[bit 8] GFNI CPUID.(EAX=7,ECX=0):ECX[bit 9] VAES CPUID.(EAX=7,ECX=0):ECX[bit 10] VPCLMULQDQ CPUID.(EAX=7,ECX=0):ECX[bit 11] AVX512_VNNI CPUID.(EAX=7,ECX=0):ECX[bit 12] AVX512_BITALG Detailed information of CPUID bits for these features can be found in the Intel Architecture Instruction Set Extensions and Future Features Programming Interface document (refer to Table 1-1. and Table 1-2.). A copy of this document is available at https://bugzilla.kernel.org/show_bug.cgi?id=197239 Signed-off-by: Gayatri Kammela Acked-by: Thomas Gleixner Cc: Andi Kleen Cc: Fenghua Yu Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Ravi Shankar Cc: Ricardo Neri Cc: Yang Zhong Cc: bp@alien8.de Link: http://lkml.kernel.org/r/1509412829-23380-1-git-send-email-gayatri.kammela@intel.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 6 ++++++ arch/x86/kernel/cpu/cpuid-deps.c | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 9a0a5b128a30ac..74370734663c43 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -300,6 +300,12 @@ #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ #define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ #define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ +#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */ +#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */ +#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */ +#define X86_FEATURE_VPCLMULQDQ (16*32+ 10) /* Carry-Less Multiplication Double Quadword */ +#define X86_FEATURE_AVX512_VNNI (16*32+ 11) /* Vector Neural Network Instructions */ +#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB */ #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index c1d49842a411fb..c21f22d836ad74 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -50,6 +50,12 @@ const static struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_AVX512BW, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL }, + { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL }, + { X86_FEATURE_VAES, X86_FEATURE_AVX512VL }, + { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL }, { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F }, { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F }, From 4ed772a7dee902953066869264934c36c7689097 Mon Sep 17 00:00:00 2001 From: Ricardo Neri Date: Fri, 27 Oct 2017 13:25:28 -0700 Subject: [PATCH 0835/3326] x86/mm: Relocate page fault error codes to traps.h commit 1067f030994c69ca1fba8c607437c8895dcf8509 upstream. Up to this point, only fault.c used the definitions of the page fault error codes. Thus, it made sense to keep them within such file. Other portions of code might be interested in those definitions too. For instance, the User- Mode Instruction Prevention emulation code will use such definitions to emulate a page fault when it is unable to successfully copy the results of the emulated instructions to user space. While relocating the error code enumeration, the prefix X86_ is used to make it consistent with the rest of the definitions in traps.h. Of course, code using the enumeration had to be updated as well. No functional changes were performed. Signed-off-by: Ricardo Neri Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Reviewed-by: Andy Lutomirski Cc: "Michael S. Tsirkin" Cc: Peter Zijlstra Cc: Dave Hansen Cc: ricardo.neri@intel.com Cc: Paul Gortmaker Cc: Huang Rui Cc: Shuah Khan Cc: Jonathan Corbet Cc: Jiri Slaby Cc: "Ravi V. Shankar" Cc: Chris Metcalf Cc: Brian Gerst Cc: Josh Poimboeuf Cc: Chen Yucong Cc: Vlastimil Babka Cc: Masami Hiramatsu Cc: Paolo Bonzini Cc: Andrew Morton Cc: "Kirill A. Shutemov" Link: https://lkml.kernel.org/r/1509135945-13762-2-git-send-email-ricardo.neri-calderon@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/traps.h | 18 ++++++++ arch/x86/mm/fault.c | 88 ++++++++++++++---------------------- 2 files changed, 52 insertions(+), 54 deletions(-) diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index b0cced97a6ce9b..a230dd1896f036 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -145,4 +145,22 @@ enum { X86_TRAP_IRET = 32, /* 32, IRET Exception */ }; +/* + * Page fault error code bits: + * + * bit 0 == 0: no page found 1: protection fault + * bit 1 == 0: read access 1: write access + * bit 2 == 0: kernel-mode access 1: user-mode access + * bit 3 == 1: use of reserved bit detected + * bit 4 == 1: fault was an instruction fetch + * bit 5 == 1: protection keys block access + */ +enum x86_pf_error_code { + X86_PF_PROT = 1 << 0, + X86_PF_WRITE = 1 << 1, + X86_PF_USER = 1 << 2, + X86_PF_RSVD = 1 << 3, + X86_PF_INSTR = 1 << 4, + X86_PF_PK = 1 << 5, +}; #endif /* _ASM_X86_TRAPS_H */ diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b0ff378650a9c7..3109ba6c6edeed 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -29,26 +29,6 @@ #define CREATE_TRACE_POINTS #include -/* - * Page fault error code bits: - * - * bit 0 == 0: no page found 1: protection fault - * bit 1 == 0: read access 1: write access - * bit 2 == 0: kernel-mode access 1: user-mode access - * bit 3 == 1: use of reserved bit detected - * bit 4 == 1: fault was an instruction fetch - * bit 5 == 1: protection keys block access - */ -enum x86_pf_error_code { - - PF_PROT = 1 << 0, - PF_WRITE = 1 << 1, - PF_USER = 1 << 2, - PF_RSVD = 1 << 3, - PF_INSTR = 1 << 4, - PF_PK = 1 << 5, -}; - /* * Returns 0 if mmiotrace is disabled, or if the fault is not * handled by mmiotrace: @@ -150,7 +130,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) * If it was a exec (instruction fetch) fault on NX page, then * do not ignore the fault: */ - if (error_code & PF_INSTR) + if (error_code & X86_PF_INSTR) return 0; instr = (void *)convert_ip_to_linear(current, regs); @@ -180,7 +160,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) * siginfo so userspace can discover which protection key was set * on the PTE. * - * If we get here, we know that the hardware signaled a PF_PK + * If we get here, we know that the hardware signaled a X86_PF_PK * fault and that there was a VMA once we got in the fault * handler. It does *not* guarantee that the VMA we find here * was the one that we faulted on. @@ -205,7 +185,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey) /* * force_sig_info_fault() is called from a number of * contexts, some of which have a VMA and some of which - * do not. The PF_PK handing happens after we have a + * do not. The X86_PF_PK handing happens after we have a * valid VMA, so we should never reach this without a * valid VMA. */ @@ -698,7 +678,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, if (!oops_may_print()) return; - if (error_code & PF_INSTR) { + if (error_code & X86_PF_INSTR) { unsigned int level; pgd_t *pgd; pte_t *pte; @@ -780,7 +760,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, */ if (current->thread.sig_on_uaccess_err && signal) { tsk->thread.trap_nr = X86_TRAP_PF; - tsk->thread.error_code = error_code | PF_USER; + tsk->thread.error_code = error_code | X86_PF_USER; tsk->thread.cr2 = address; /* XXX: hwpoison faults will set the wrong code. */ @@ -898,7 +878,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, struct task_struct *tsk = current; /* User mode accesses just cause a SIGSEGV */ - if (error_code & PF_USER) { + if (error_code & X86_PF_USER) { /* * It's possible to have interrupts off here: */ @@ -919,7 +899,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, * Instruction fetch faults in the vsyscall page might need * emulation. */ - if (unlikely((error_code & PF_INSTR) && + if (unlikely((error_code & X86_PF_INSTR) && ((address & ~0xfff) == VSYSCALL_ADDR))) { if (emulate_vsyscall(regs, address)) return; @@ -932,7 +912,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, * are always protection faults. */ if (address >= TASK_SIZE_MAX) - error_code |= PF_PROT; + error_code |= X86_PF_PROT; if (likely(show_unhandled_signals)) show_signal_msg(regs, error_code, address, tsk); @@ -993,11 +973,11 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code, if (!boot_cpu_has(X86_FEATURE_OSPKE)) return false; - if (error_code & PF_PK) + if (error_code & X86_PF_PK) return true; /* this checks permission keys on the VMA: */ - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), - (error_code & PF_INSTR), foreign)) + if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), + (error_code & X86_PF_INSTR), foreign)) return true; return false; } @@ -1025,7 +1005,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, int code = BUS_ADRERR; /* Kernel mode? Handle exceptions or die: */ - if (!(error_code & PF_USER)) { + if (!(error_code & X86_PF_USER)) { no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); return; } @@ -1053,14 +1033,14 @@ static noinline void mm_fault_error(struct pt_regs *regs, unsigned long error_code, unsigned long address, u32 *pkey, unsigned int fault) { - if (fatal_signal_pending(current) && !(error_code & PF_USER)) { + if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) { no_context(regs, error_code, address, 0, 0); return; } if (fault & VM_FAULT_OOM) { /* Kernel mode? Handle exceptions or die: */ - if (!(error_code & PF_USER)) { + if (!(error_code & X86_PF_USER)) { no_context(regs, error_code, address, SIGSEGV, SEGV_MAPERR); return; @@ -1085,16 +1065,16 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, static int spurious_fault_check(unsigned long error_code, pte_t *pte) { - if ((error_code & PF_WRITE) && !pte_write(*pte)) + if ((error_code & X86_PF_WRITE) && !pte_write(*pte)) return 0; - if ((error_code & PF_INSTR) && !pte_exec(*pte)) + if ((error_code & X86_PF_INSTR) && !pte_exec(*pte)) return 0; /* * Note: We do not do lazy flushing on protection key - * changes, so no spurious fault will ever set PF_PK. + * changes, so no spurious fault will ever set X86_PF_PK. */ - if ((error_code & PF_PK)) + if ((error_code & X86_PF_PK)) return 1; return 1; @@ -1140,8 +1120,8 @@ spurious_fault(unsigned long error_code, unsigned long address) * change, so user accesses are not expected to cause spurious * faults. */ - if (error_code != (PF_WRITE | PF_PROT) - && error_code != (PF_INSTR | PF_PROT)) + if (error_code != (X86_PF_WRITE | X86_PF_PROT) && + error_code != (X86_PF_INSTR | X86_PF_PROT)) return 0; pgd = init_mm.pgd + pgd_index(address); @@ -1201,19 +1181,19 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) * always an unconditional error and can never result in * a follow-up action to resolve the fault, like a COW. */ - if (error_code & PF_PK) + if (error_code & X86_PF_PK) return 1; /* * Make sure to check the VMA so that we do not perform - * faults just to hit a PF_PK as soon as we fill in a + * faults just to hit a X86_PF_PK as soon as we fill in a * page. */ - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), - (error_code & PF_INSTR), foreign)) + if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), + (error_code & X86_PF_INSTR), foreign)) return 1; - if (error_code & PF_WRITE) { + if (error_code & X86_PF_WRITE) { /* write, present and write, not present: */ if (unlikely(!(vma->vm_flags & VM_WRITE))) return 1; @@ -1221,7 +1201,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) } /* read, present: */ - if (unlikely(error_code & PF_PROT)) + if (unlikely(error_code & X86_PF_PROT)) return 1; /* read, not present: */ @@ -1244,7 +1224,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs) if (!static_cpu_has(X86_FEATURE_SMAP)) return false; - if (error_code & PF_USER) + if (error_code & X86_PF_USER) return false; if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC)) @@ -1297,7 +1277,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * protection error (error_code & 9) == 0. */ if (unlikely(fault_in_kernel_space(address))) { - if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { + if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) { if (vmalloc_fault(address) >= 0) return; @@ -1325,7 +1305,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, if (unlikely(kprobes_fault(regs))) return; - if (unlikely(error_code & PF_RSVD)) + if (unlikely(error_code & X86_PF_RSVD)) pgtable_bad(regs, error_code, address); if (unlikely(smap_violation(error_code, regs))) { @@ -1351,7 +1331,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, */ if (user_mode(regs)) { local_irq_enable(); - error_code |= PF_USER; + error_code |= X86_PF_USER; flags |= FAULT_FLAG_USER; } else { if (regs->flags & X86_EFLAGS_IF) @@ -1360,9 +1340,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - if (error_code & PF_WRITE) + if (error_code & X86_PF_WRITE) flags |= FAULT_FLAG_WRITE; - if (error_code & PF_INSTR) + if (error_code & X86_PF_INSTR) flags |= FAULT_FLAG_INSTRUCTION; /* @@ -1382,7 +1362,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * space check, thus avoiding the deadlock: */ if (unlikely(!down_read_trylock(&mm->mmap_sem))) { - if ((error_code & PF_USER) == 0 && + if (!(error_code & X86_PF_USER) && !search_exception_tables(regs->ip)) { bad_area_nosemaphore(regs, error_code, address, NULL); return; @@ -1409,7 +1389,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, bad_area(regs, error_code, address); return; } - if (error_code & PF_USER) { + if (error_code & X86_PF_USER) { /* * Accessing the stack below %sp is always a bug. * The large cushion allows instructions like enter From 985cba48423594b8e8792770a65543d7ee689eba Mon Sep 17 00:00:00 2001 From: Ricardo Neri Date: Fri, 27 Oct 2017 13:25:29 -0700 Subject: [PATCH 0836/3326] x86/boot: Relocate definition of the initial state of CR0 commit b0ce5b8c95c83a7b98c679b117e3d6ae6f97154b upstream. Both head_32.S and head_64.S utilize the same value to initialize the control register CR0. Also, other parts of the kernel might want to access this initial definition (e.g., emulation code for User-Mode Instruction Prevention uses this state to provide a sane dummy value for CR0 when emulating the smsw instruction). Thus, relocate this definition to a header file from which it can be conveniently accessed. Suggested-by: Borislav Petkov Signed-off-by: Ricardo Neri Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Reviewed-by: Andy Lutomirski Cc: "Michael S. Tsirkin" Cc: Peter Zijlstra Cc: Dave Hansen Cc: ricardo.neri@intel.com Cc: linux-mm@kvack.org Cc: Paul Gortmaker Cc: Huang Rui Cc: Shuah Khan Cc: linux-arch@vger.kernel.org Cc: Jonathan Corbet Cc: Jiri Slaby Cc: "Ravi V. Shankar" Cc: Denys Vlasenko Cc: Chris Metcalf Cc: Brian Gerst Cc: Josh Poimboeuf Cc: Chen Yucong Cc: Vlastimil Babka Cc: Dave Hansen Cc: Andy Lutomirski Cc: Masami Hiramatsu Cc: Paolo Bonzini Cc: Andrew Morton Cc: Linus Torvalds Link: https://lkml.kernel.org/r/1509135945-13762-3-git-send-email-ricardo.neri-calderon@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/uapi/asm/processor-flags.h | 3 +++ arch/x86/kernel/head_32.S | 3 --- arch/x86/kernel/head_64.S | 3 --- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h index 6f335539966500..53b4ca55ebb66e 100644 --- a/arch/x86/include/uapi/asm/processor-flags.h +++ b/arch/x86/include/uapi/asm/processor-flags.h @@ -152,5 +152,8 @@ #define CX86_ARR_BASE 0xc4 #define CX86_RCR_BASE 0xdc +#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ + X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ + X86_CR0_PG) #endif /* _UAPI_ASM_X86_PROCESSOR_FLAGS_H */ diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 46c9f17b18e96b..c29020907886a3 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -212,9 +212,6 @@ ENTRY(startup_32_smp) #endif .Ldefault_entry: -#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ - X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ - X86_CR0_PG) movl $(CR0_STATE & ~X86_CR0_PG),%eax movl %eax,%cr0 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 078eab728b4791..1f747734318dab 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -154,9 +154,6 @@ ENTRY(secondary_startup_64) 1: wrmsr /* Make changes effective */ /* Setup cr0 */ -#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ - X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ - X86_CR0_PG) movl $CR0_STATE, %eax /* Make changes effective */ movq %rax, %cr0 From ab3e5dfff36f4af4df09d86384376ebd188d7ae9 Mon Sep 17 00:00:00 2001 From: Ricardo Neri Date: Fri, 27 Oct 2017 13:25:30 -0700 Subject: [PATCH 0837/3326] ptrace,x86: Make user_64bit_mode() available to 32-bit builds commit e27c310af5c05cf876d9cad006928076c27f54d4 upstream. In its current form, user_64bit_mode() can only be used when CONFIG_X86_64 is selected. This implies that code built with CONFIG_X86_64=n cannot use it. If a piece of code needs to be built for both CONFIG_X86_64=y and CONFIG_X86_64=n and wants to use this function, it needs to wrap it in an #ifdef/#endif; potentially, in multiple places. This can be easily avoided with a single #ifdef/#endif pair within user_64bit_mode() itself. Suggested-by: Borislav Petkov Signed-off-by: Ricardo Neri Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: "Michael S. Tsirkin" Cc: Peter Zijlstra Cc: Dave Hansen Cc: ricardo.neri@intel.com Cc: Adrian Hunter Cc: Paul Gortmaker Cc: Huang Rui Cc: Qiaowei Ren Cc: Shuah Khan Cc: Kees Cook Cc: Jonathan Corbet Cc: Jiri Slaby Cc: Dmitry Vyukov Cc: "Ravi V. Shankar" Cc: Chris Metcalf Cc: Brian Gerst Cc: Arnaldo Carvalho de Melo Cc: Andy Lutomirski Cc: Colin Ian King Cc: Chen Yucong Cc: Adam Buchbinder Cc: Vlastimil Babka Cc: Lorenzo Stoakes Cc: Masami Hiramatsu Cc: Paolo Bonzini Cc: Andrew Morton Cc: Thomas Garnier Link: https://lkml.kernel.org/r/1509135945-13762-4-git-send-email-ricardo.neri-calderon@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/ptrace.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index c0e3c45cf6aba2..14131dd06b290a 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -136,9 +136,9 @@ static inline int v8086_mode(struct pt_regs *regs) #endif } -#ifdef CONFIG_X86_64 static inline bool user_64bit_mode(struct pt_regs *regs) { +#ifdef CONFIG_X86_64 #ifndef CONFIG_PARAVIRT /* * On non-paravirt systems, this is the only long mode CPL 3 @@ -149,8 +149,12 @@ static inline bool user_64bit_mode(struct pt_regs *regs) /* Headers are too twisted for this to go in paravirt.h. */ return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; #endif +#else /* !CONFIG_X86_64 */ + return false; +#endif } +#ifdef CONFIG_X86_64 #define current_user_stack_pointer() current_pt_regs()->sp #define compat_user_stack_pointer() current_pt_regs()->sp #endif From b7ee7fcca8a525c9bc636fab23779f18d3de9e80 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:58:58 -0700 Subject: [PATCH 0838/3326] x86/entry/64: Remove the restore_c_regs_and_iret label commit 9da78ba6b47b46428cfdfc0851511ab29c869798 upstream. The only user was the 64-bit opportunistic SYSRET failure path, and that path didn't really need it. This change makes the opportunistic SYSRET code a bit more straightforward and gets rid of the label. Signed-off-by: Andy Lutomirski Reviewed-by: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/be3006a7ad3326e3458cf1cc55d416252cbe1986.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 5557e94c5f1c13..33eb2127eac8bf 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -246,7 +246,6 @@ entry_SYSCALL64_slow_path: call do_syscall_64 /* returns with IRQs disabled */ return_from_SYSCALL_64: - RESTORE_EXTRA_REGS TRACE_IRQS_IRETQ /* we're about to change IF */ /* @@ -315,6 +314,7 @@ return_from_SYSCALL_64: */ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ + RESTORE_EXTRA_REGS RESTORE_C_REGS_EXCEPT_RCX_R11 movq RSP(%rsp), %rsp UNWIND_HINT_EMPTY @@ -322,7 +322,7 @@ syscall_return_via_sysret: opportunistic_sysret_failed: SWAPGS - jmp restore_c_regs_and_iret + jmp restore_regs_and_iret END(entry_SYSCALL_64) ENTRY(stub_ptregs_64) @@ -639,7 +639,6 @@ retint_kernel: */ GLOBAL(restore_regs_and_iret) RESTORE_EXTRA_REGS -restore_c_regs_and_iret: RESTORE_C_REGS REMOVE_PT_GPREGS_FROM_STACK 8 INTERRUPT_RETURN From 65236abc42b068506720f4a5168e5e63636138dd Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:58:59 -0700 Subject: [PATCH 0839/3326] x86/entry/64: Split the IRET-to-user and IRET-to-kernel paths commit 26c4ef9c49d8a0341f6d97ce2cfdd55d1236ed29 upstream. These code paths will diverge soon. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/dccf8c7b3750199b4b30383c812d4e2931811509.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 34 +++++++++++++++++++++++--------- arch/x86/entry/entry_64_compat.S | 2 +- arch/x86/kernel/head_64.S | 2 +- 3 files changed, 27 insertions(+), 11 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 33eb2127eac8bf..9e262c8ccc2d97 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -322,7 +322,7 @@ syscall_return_via_sysret: opportunistic_sysret_failed: SWAPGS - jmp restore_regs_and_iret + jmp restore_regs_and_return_to_usermode END(entry_SYSCALL_64) ENTRY(stub_ptregs_64) @@ -424,7 +424,7 @@ ENTRY(ret_from_fork) call syscall_return_slowpath /* returns with IRQs disabled */ TRACE_IRQS_ON /* user mode is traced as IRQS on */ SWAPGS - jmp restore_regs_and_iret + jmp restore_regs_and_return_to_usermode 1: /* kernel thread */ @@ -613,7 +613,20 @@ GLOBAL(retint_user) call prepare_exit_to_usermode TRACE_IRQS_IRETQ SWAPGS - jmp restore_regs_and_iret + +GLOBAL(restore_regs_and_return_to_usermode) +#ifdef CONFIG_DEBUG_ENTRY + /* Assert that pt_regs indicates user mode. */ + testl $3, CS(%rsp) + jnz 1f + ud2 +1: +#endif + RESTORE_EXTRA_REGS + RESTORE_C_REGS + REMOVE_PT_GPREGS_FROM_STACK 8 + INTERRUPT_RETURN + /* Returning to kernel space */ retint_kernel: @@ -633,11 +646,14 @@ retint_kernel: */ TRACE_IRQS_IRETQ -/* - * At this label, code paths which return to kernel and to user, - * which come from interrupts/exception and from syscalls, merge. - */ -GLOBAL(restore_regs_and_iret) +GLOBAL(restore_regs_and_return_to_kernel) +#ifdef CONFIG_DEBUG_ENTRY + /* Assert that pt_regs indicates kernel mode. */ + testl $3, CS(%rsp) + jz 1f + ud2 +1: +#endif RESTORE_EXTRA_REGS RESTORE_C_REGS REMOVE_PT_GPREGS_FROM_STACK 8 @@ -1328,7 +1344,7 @@ ENTRY(nmi) * work, because we don't want to enable interrupts. */ SWAPGS - jmp restore_regs_and_iret + jmp restore_regs_and_return_to_usermode .Lnmi_from_kernel: /* diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index b5c7a56ed256d4..2fd682212d1d6c 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -338,7 +338,7 @@ ENTRY(entry_INT80_compat) /* Go back to user mode. */ TRACE_IRQS_ON SWAPGS - jmp restore_regs_and_iret + jmp restore_regs_and_return_to_usermode END(entry_INT80_compat) ENTRY(stub32_clone) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 1f747734318dab..7dca675fe78db6 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -328,7 +328,7 @@ early_idt_handler_common: 20: decl early_recursion_flag(%rip) - jmp restore_regs_and_iret + jmp restore_regs_and_return_to_kernel END(early_idt_handler_common) __INITDATA From 7d4bb32bc6fded10ff2874ae1d6d655b5511829c Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:00 -0700 Subject: [PATCH 0840/3326] x86/entry/64: Move SWAPGS into the common IRET-to-usermode path commit 8a055d7f411d41755ce30db5bb65b154777c4b78 upstream. All of the code paths that ended up doing IRET to usermode did SWAPGS immediately beforehand. Move the SWAPGS into the common code. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/27fd6f45b7cd640de38fb9066fd0349bcd11f8e1.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 32 ++++++++++++++------------------ arch/x86/entry/entry_64_compat.S | 3 +-- 2 files changed, 15 insertions(+), 20 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 9e262c8ccc2d97..f67ac2647dbf54 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -250,12 +250,14 @@ return_from_SYSCALL_64: /* * Try to use SYSRET instead of IRET if we're returning to - * a completely clean 64-bit userspace context. + * a completely clean 64-bit userspace context. If we're not, + * go to the slow exit path. */ movq RCX(%rsp), %rcx movq RIP(%rsp), %r11 - cmpq %rcx, %r11 /* RCX == RIP */ - jne opportunistic_sysret_failed + + cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ + jne swapgs_restore_regs_and_return_to_usermode /* * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP @@ -273,14 +275,14 @@ return_from_SYSCALL_64: /* If this changed %rcx, it was not canonical */ cmpq %rcx, %r11 - jne opportunistic_sysret_failed + jne swapgs_restore_regs_and_return_to_usermode cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ - jne opportunistic_sysret_failed + jne swapgs_restore_regs_and_return_to_usermode movq R11(%rsp), %r11 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ - jne opportunistic_sysret_failed + jne swapgs_restore_regs_and_return_to_usermode /* * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot @@ -301,12 +303,12 @@ return_from_SYSCALL_64: * would never get past 'stuck_here'. */ testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 - jnz opportunistic_sysret_failed + jnz swapgs_restore_regs_and_return_to_usermode /* nothing to check for RSP */ cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ - jne opportunistic_sysret_failed + jne swapgs_restore_regs_and_return_to_usermode /* * We win! This label is here just for ease of understanding @@ -319,10 +321,6 @@ syscall_return_via_sysret: movq RSP(%rsp), %rsp UNWIND_HINT_EMPTY USERGS_SYSRET64 - -opportunistic_sysret_failed: - SWAPGS - jmp restore_regs_and_return_to_usermode END(entry_SYSCALL_64) ENTRY(stub_ptregs_64) @@ -423,8 +421,7 @@ ENTRY(ret_from_fork) movq %rsp, %rdi call syscall_return_slowpath /* returns with IRQs disabled */ TRACE_IRQS_ON /* user mode is traced as IRQS on */ - SWAPGS - jmp restore_regs_and_return_to_usermode + jmp swapgs_restore_regs_and_return_to_usermode 1: /* kernel thread */ @@ -612,9 +609,8 @@ GLOBAL(retint_user) mov %rsp,%rdi call prepare_exit_to_usermode TRACE_IRQS_IRETQ - SWAPGS -GLOBAL(restore_regs_and_return_to_usermode) +GLOBAL(swapgs_restore_regs_and_return_to_usermode) #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates user mode. */ testl $3, CS(%rsp) @@ -622,6 +618,7 @@ GLOBAL(restore_regs_and_return_to_usermode) ud2 1: #endif + SWAPGS RESTORE_EXTRA_REGS RESTORE_C_REGS REMOVE_PT_GPREGS_FROM_STACK 8 @@ -1343,8 +1340,7 @@ ENTRY(nmi) * Return back to user mode. We must *not* do the normal exit * work, because we don't want to enable interrupts. */ - SWAPGS - jmp restore_regs_and_return_to_usermode + jmp swapgs_restore_regs_and_return_to_usermode .Lnmi_from_kernel: /* diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 2fd682212d1d6c..568e130d932cd2 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -337,8 +337,7 @@ ENTRY(entry_INT80_compat) /* Go back to user mode. */ TRACE_IRQS_ON - SWAPGS - jmp restore_regs_and_return_to_usermode + jmp swapgs_restore_regs_and_return_to_usermode END(entry_INT80_compat) ENTRY(stub32_clone) From 37bae8ecdb518a676c5e12520f393d0069e57dfe Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:01 -0700 Subject: [PATCH 0841/3326] x86/entry/64: Simplify reg restore code in the standard IRET paths commit e872045bfd9c465a8555bab4b8567d56a4d2d3bb upstream. The old code restored all the registers with movq instead of pop. In theory, this was done because some CPUs have higher movq throughput, but any gain there would be tiny and is almost certainly outweighed by the higher text size. This saves 96 bytes of text. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/ad82520a207ccd851b04ba613f4f752b33ac05f7.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 21 +++++++++++++++++++++ arch/x86/entry/entry_64.S | 12 ++++++------ 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 6e160031cfea1d..67d1df061d1abc 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -152,6 +152,27 @@ For 32-bit we have the following conventions - kernel is built with UNWIND_HINT_REGS offset=\offset extra=0 .endm + .macro POP_EXTRA_REGS + popq %r15 + popq %r14 + popq %r13 + popq %r12 + popq %rbp + popq %rbx + .endm + + .macro POP_C_REGS + popq %r11 + popq %r10 + popq %r9 + popq %r8 + popq %rax + popq %rcx + popq %rdx + popq %rsi + popq %rdi + .endm + .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 .if \rstor_r11 movq 6*8(%rsp), %r11 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index f67ac2647dbf54..77a67ed991fc50 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -619,9 +619,9 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) 1: #endif SWAPGS - RESTORE_EXTRA_REGS - RESTORE_C_REGS - REMOVE_PT_GPREGS_FROM_STACK 8 + POP_EXTRA_REGS + POP_C_REGS + addq $8, %rsp /* skip regs->orig_ax */ INTERRUPT_RETURN @@ -651,9 +651,9 @@ GLOBAL(restore_regs_and_return_to_kernel) ud2 1: #endif - RESTORE_EXTRA_REGS - RESTORE_C_REGS - REMOVE_PT_GPREGS_FROM_STACK 8 + POP_EXTRA_REGS + POP_C_REGS + addq $8, %rsp /* skip regs->orig_ax */ INTERRUPT_RETURN ENTRY(native_iret) From b774233bcdd4753267c7238a4b7754b9cc2b45ed Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:02 -0700 Subject: [PATCH 0842/3326] x86/entry/64: Shrink paranoid_exit_restore and make labels local commit e53178328c9b96fbdbc719e78c93b5687ee007c3 upstream. paranoid_exit_restore was a copy of restore_regs_and_return_to_kernel. Merge them and make the paranoid_exit internal labels local. Keeping .Lparanoid_exit makes the code a bit shorter because it allows a 2-byte jnz instead of a 5-byte jnz. Saves 96 bytes of text. ( This is still a bit suboptimal in a non-CONFIG_TRACE_IRQFLAGS kernel, but fixing that would make the code rather messy. ) Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/510d66a1895cda9473c84b1086f0bb974f22de6a.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 77a67ed991fc50..a828fb23d6fa23 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1124,17 +1124,14 @@ ENTRY(paranoid_exit) DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG testl %ebx, %ebx /* swapgs needed? */ - jnz paranoid_exit_no_swapgs + jnz .Lparanoid_exit_no_swapgs TRACE_IRQS_IRETQ SWAPGS_UNSAFE_STACK - jmp paranoid_exit_restore -paranoid_exit_no_swapgs: + jmp .Lparanoid_exit_restore +.Lparanoid_exit_no_swapgs: TRACE_IRQS_IRETQ_DEBUG -paranoid_exit_restore: - RESTORE_EXTRA_REGS - RESTORE_C_REGS - REMOVE_PT_GPREGS_FROM_STACK 8 - INTERRUPT_RETURN +.Lparanoid_exit_restore: + jmp restore_regs_and_return_to_kernel END(paranoid_exit) /* From 673b1522c6585548798b281419f4ef67b8d17045 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:03 -0700 Subject: [PATCH 0843/3326] x86/entry/64: Use pop instead of movq in syscall_return_via_sysret commit 4fbb39108f972437c44e5ffa781b56635d496826 upstream. Saves 64 bytes. Signed-off-by: Andy Lutomirski Reviewed-by: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/6609b7f74ab31c36604ad746e019ea8495aec76c.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index a828fb23d6fa23..74dd2ec1e4e9fb 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -316,10 +316,18 @@ return_from_SYSCALL_64: */ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ - RESTORE_EXTRA_REGS - RESTORE_C_REGS_EXCEPT_RCX_R11 - movq RSP(%rsp), %rsp UNWIND_HINT_EMPTY + POP_EXTRA_REGS + popq %rsi /* skip r11 */ + popq %r10 + popq %r9 + popq %r8 + popq %rax + popq %rsi /* skip rcx */ + popq %rdx + popq %rsi + popq %rdi + movq RSP-ORIG_RAX(%rsp), %rsp USERGS_SYSRET64 END(entry_SYSCALL_64) From e7273aae139703223ef2128c88188a6a3182e855 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:04 -0700 Subject: [PATCH 0844/3326] x86/entry/64: Merge the fast and slow SYSRET paths commit a512210643da8082cb44181dba8b18e752bd68f0 upstream. They did almost the same thing. Remove a bunch of pointless instructions (mostly hidden in macros) and reduce cognitive load by merging them. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1204e20233fcab9130a1ba80b3b1879b5db3fc1f.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 74dd2ec1e4e9fb..e6c75f6ac7e082 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -221,10 +221,9 @@ entry_SYSCALL_64_fastpath: TRACE_IRQS_ON /* user mode is traced as IRQs on */ movq RIP(%rsp), %rcx movq EFLAGS(%rsp), %r11 - RESTORE_C_REGS_EXCEPT_RCX_R11 - movq RSP(%rsp), %rsp + addq $6*8, %rsp /* skip extra regs -- they were preserved */ UNWIND_HINT_EMPTY - USERGS_SYSRET64 + jmp .Lpop_c_regs_except_rcx_r11_and_sysret 1: /* @@ -318,6 +317,7 @@ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ UNWIND_HINT_EMPTY POP_EXTRA_REGS +.Lpop_c_regs_except_rcx_r11_and_sysret: popq %rsi /* skip r11 */ popq %r10 popq %r9 From 2550871499e4521792db6f282964d994d25aa459 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:05 -0700 Subject: [PATCH 0845/3326] x86/entry/64: Use POP instead of MOV to restore regs on NMI return commit 471ee4832209e986029b9fabdaad57b1eecb856b upstream. This gets rid of the last user of the old RESTORE_..._REGS infrastructure. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/652a260f17a160789bc6a41d997f98249b73e2ab.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index e6c75f6ac7e082..50f887b15ac2ef 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1560,11 +1560,14 @@ end_repeat_nmi: nmi_swapgs: SWAPGS_UNSAFE_STACK nmi_restore: - RESTORE_EXTRA_REGS - RESTORE_C_REGS + POP_EXTRA_REGS + POP_C_REGS - /* Point RSP at the "iret" frame. */ - REMOVE_PT_GPREGS_FROM_STACK 6*8 + /* + * Skip orig_ax and the "outermost" frame to point RSP at the "iret" + * at the "iret" frame. + */ + addq $6*8, %rsp /* * Clear "NMI executing". Set DF first so that we can easily From 8d50dee92fb29c953a7101af63b0aea667c13296 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:06 -0700 Subject: [PATCH 0846/3326] x86/entry/64: Remove the RESTORE_..._REGS infrastructure commit c39858de696f0cc160a544455e8403d663d577e9 upstream. All users of RESTORE_EXTRA_REGS, RESTORE_C_REGS and such, and REMOVE_PT_GPREGS_FROM_STACK are gone. Delete the macros. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/c32672f6e47c561893316d48e06c7656b1039a36.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 52 ---------------------------------------- 1 file changed, 52 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 67d1df061d1abc..3fd8bc560faece 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -142,16 +142,6 @@ For 32-bit we have the following conventions - kernel is built with UNWIND_HINT_REGS offset=\offset .endm - .macro RESTORE_EXTRA_REGS offset=0 - movq 0*8+\offset(%rsp), %r15 - movq 1*8+\offset(%rsp), %r14 - movq 2*8+\offset(%rsp), %r13 - movq 3*8+\offset(%rsp), %r12 - movq 4*8+\offset(%rsp), %rbp - movq 5*8+\offset(%rsp), %rbx - UNWIND_HINT_REGS offset=\offset extra=0 - .endm - .macro POP_EXTRA_REGS popq %r15 popq %r14 @@ -173,48 +163,6 @@ For 32-bit we have the following conventions - kernel is built with popq %rdi .endm - .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 - .if \rstor_r11 - movq 6*8(%rsp), %r11 - .endif - .if \rstor_r8910 - movq 7*8(%rsp), %r10 - movq 8*8(%rsp), %r9 - movq 9*8(%rsp), %r8 - .endif - .if \rstor_rax - movq 10*8(%rsp), %rax - .endif - .if \rstor_rcx - movq 11*8(%rsp), %rcx - .endif - .if \rstor_rdx - movq 12*8(%rsp), %rdx - .endif - movq 13*8(%rsp), %rsi - movq 14*8(%rsp), %rdi - UNWIND_HINT_IRET_REGS offset=16*8 - .endm - .macro RESTORE_C_REGS - RESTORE_C_REGS_HELPER 1,1,1,1,1 - .endm - .macro RESTORE_C_REGS_EXCEPT_RAX - RESTORE_C_REGS_HELPER 0,1,1,1,1 - .endm - .macro RESTORE_C_REGS_EXCEPT_RCX - RESTORE_C_REGS_HELPER 1,0,1,1,1 - .endm - .macro RESTORE_C_REGS_EXCEPT_R11 - RESTORE_C_REGS_HELPER 1,1,0,1,1 - .endm - .macro RESTORE_C_REGS_EXCEPT_RCX_R11 - RESTORE_C_REGS_HELPER 1,0,0,1,1 - .endm - - .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0 - subq $-(15*8+\addskip), %rsp - .endm - .macro icebp .byte 0xf1 .endm From f53f7a3f0156a39eccea5bfd8a746121627c9c27 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 2 Nov 2017 00:59:07 -0700 Subject: [PATCH 0847/3326] xen, x86/entry/64: Add xen NMI trap entry commit 43e4111086a70c78bedb6ad990bee97f17b27a6e upstream. Instead of trying to execute any NMI via the bare metal's NMI trap handler use a Xen specific one for PV domains, like we do for e.g. debug traps. As in a PV domain the NMI is handled via the normal kernel stack this is the correct thing to do. This will enable us to get rid of the very fragile and questionable dependencies between the bare metal NMI handler and Xen assumptions believed to be broken anyway. Signed-off-by: Juergen Gross Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/5baf5c0528d58402441550c5770b98e7961e7680.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 2 +- arch/x86/include/asm/traps.h | 2 +- arch/x86/xen/enlighten_pv.c | 2 +- arch/x86/xen/xen-asm_64.S | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 50f887b15ac2ef..01348b44010cbd 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1079,6 +1079,7 @@ idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK idtentry stack_segment do_stack_segment has_error_code=1 #ifdef CONFIG_XEN +idtentry xennmi do_nmi has_error_code=0 idtentry xendebug do_debug has_error_code=0 idtentry xenint3 do_int3 has_error_code=0 #endif @@ -1241,7 +1242,6 @@ ENTRY(error_exit) END(error_exit) /* Runs on exception stack */ -/* XXX: broken on Xen PV */ ENTRY(nmi) UNWIND_HINT_IRET_REGS /* diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index a230dd1896f036..1fadd310ff680e 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -38,9 +38,9 @@ asmlinkage void simd_coprocessor_error(void); #if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV) asmlinkage void xen_divide_error(void); +asmlinkage void xen_xennmi(void); asmlinkage void xen_xendebug(void); asmlinkage void xen_xenint3(void); -asmlinkage void xen_nmi(void); asmlinkage void xen_overflow(void); asmlinkage void xen_bounds(void); asmlinkage void xen_invalid_op(void); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index d4396e27b1fb75..60eb6dcacaac52 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -601,7 +601,7 @@ static struct trap_array_entry trap_array[] = { #ifdef CONFIG_X86_MCE { machine_check, xen_machine_check, true }, #endif - { nmi, xen_nmi, true }, + { nmi, xen_xennmi, true }, { overflow, xen_overflow, false }, #ifdef CONFIG_IA32_EMULATION { entry_INT80_compat, xen_entry_INT80_compat, false }, diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index c98a48c861fd33..8a10c9a9e2b506 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S @@ -30,7 +30,7 @@ xen_pv_trap debug xen_pv_trap xendebug xen_pv_trap int3 xen_pv_trap xenint3 -xen_pv_trap nmi +xen_pv_trap xennmi xen_pv_trap overflow xen_pv_trap bounds xen_pv_trap invalid_op From 6ff096cf2bf86824579bdcce3743143f993fbe5c Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:08 -0700 Subject: [PATCH 0848/3326] x86/entry/64: De-Xen-ify our NMI code commit 929bacec21478a72c78e4f29f98fb799bd00105a upstream. Xen PV is fundamentally incompatible with our fancy NMI code: it doesn't use IST at all, and Xen entries clobber two stack slots below the hardware frame. Drop Xen PV support from our NMI code entirely. Signed-off-by: Andy Lutomirski Reviewed-by: Borislav Petkov Acked-by: Juergen Gross Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/bfbe711b5ae03f672f8848999a8eb2711efc7f98.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 01348b44010cbd..a9c248b443053a 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1241,9 +1241,13 @@ ENTRY(error_exit) jmp retint_user END(error_exit) -/* Runs on exception stack */ +/* + * Runs on exception stack. Xen PV does not go through this path at all, + * so we can use real assembly here. + */ ENTRY(nmi) UNWIND_HINT_IRET_REGS + /* * We allow breakpoints in NMIs. If a breakpoint occurs, then * the iretq it performs will take us out of NMI context. @@ -1301,7 +1305,7 @@ ENTRY(nmi) * stacks lest we corrupt the "NMI executing" variable. */ - SWAPGS_UNSAFE_STACK + swapgs cld movq %rsp, %rdx movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp @@ -1466,7 +1470,7 @@ nested_nmi_out: popq %rdx /* We are returning to kernel mode, so this cannot result in a fault. */ - INTERRUPT_RETURN + iretq first_nmi: /* Restore rdx. */ @@ -1497,7 +1501,7 @@ first_nmi: pushfq /* RFLAGS */ pushq $__KERNEL_CS /* CS */ pushq $1f /* RIP */ - INTERRUPT_RETURN /* continues at repeat_nmi below */ + iretq /* continues at repeat_nmi below */ UNWIND_HINT_IRET_REGS 1: #endif @@ -1572,20 +1576,22 @@ nmi_restore: /* * Clear "NMI executing". Set DF first so that we can easily * distinguish the remaining code between here and IRET from - * the SYSCALL entry and exit paths. On a native kernel, we - * could just inspect RIP, but, on paravirt kernels, - * INTERRUPT_RETURN can translate into a jump into a - * hypercall page. + * the SYSCALL entry and exit paths. + * + * We arguably should just inspect RIP instead, but I (Andy) wrote + * this code when I had the misapprehension that Xen PV supported + * NMIs, and Xen PV would break that approach. */ std movq $0, 5*8(%rsp) /* clear "NMI executing" */ /* - * INTERRUPT_RETURN reads the "iret" frame and exits the NMI - * stack in a single instruction. We are returning to kernel - * mode, so this cannot result in a fault. + * iretq reads the "iret" frame and exits the NMI stack in a + * single instruction. We are returning to kernel mode, so this + * cannot result in a fault. Similarly, we don't need to worry + * about espfix64 on the way back to kernel mode. */ - INTERRUPT_RETURN + iretq END(nmi) ENTRY(ignore_sysret) From ebef3548d5777e842acb0ab1bba599643525536c Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:09 -0700 Subject: [PATCH 0849/3326] x86/entry/32: Pull the MSR_IA32_SYSENTER_CS update code out of native_load_sp0() commit bd7dc5a6afac719d8ce4092391eef2c7e83c2a75 upstream. This causes the MSR_IA32_SYSENTER_CS write to move out of the paravirt callback. This shouldn't affect Xen PV: Xen already ignores MSR_IA32_SYSENTER_ESP writes. In any event, Xen doesn't support vm86() in a useful way. Note to any potential backporters: This patch won't break lguest, as lguest didn't have any SYSENTER support at all. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/75cf09fe03ae778532d0ca6c65aa58e66bc2f90c.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/processor.h | 7 ------- arch/x86/include/asm/switch_to.h | 12 ++++++++++++ arch/x86/kernel/process_32.c | 4 +++- arch/x86/kernel/process_64.c | 2 +- arch/x86/kernel/vm86_32.c | 6 +++++- 5 files changed, 21 insertions(+), 10 deletions(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index bdac19ab24888f..cdb4614f49fa69 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -521,13 +521,6 @@ static inline void native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) { tss->x86_tss.sp0 = thread->sp0; -#ifdef CONFIG_X86_32 - /* Only happens when SEP is enabled, no need to test "SEP"arately: */ - if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { - tss->x86_tss.ss1 = thread->sysenter_cs; - wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); - } -#endif } static inline void native_swapgs(void) diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 899084b70412eb..0fe929d1ff70a4 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -73,4 +73,16 @@ do { \ ((last) = __switch_to_asm((prev), (next))); \ } while (0) +#ifdef CONFIG_X86_32 +static inline void refresh_sysenter_cs(struct thread_struct *thread) +{ + /* Only happens when SEP is enabled, no need to test "SEP"arately: */ + if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs)) + return; + + this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs); + wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); +} +#endif + #endif /* _ASM_X86_SWITCH_TO_H */ diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 11966251cd4259..0936ed3da6b693 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -284,9 +284,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* * Reload esp0 and cpu_current_top_of_stack. This changes - * current_thread_info(). + * current_thread_info(). Refresh the SYSENTER configuration in + * case prev or next is vm86. */ load_sp0(tss, next); + refresh_sysenter_cs(next); this_cpu_write(cpu_current_top_of_stack, (unsigned long)task_stack_page(next_p) + THREAD_SIZE); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 302e7b2572d188..a6ff6d1a011011 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -464,7 +464,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) */ this_cpu_write(current_task, next_p); - /* Reload esp0 and ss1. This changes current_thread_info(). */ + /* Reload sp0. */ load_sp0(tss, next); /* diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 68244742ecb0bc..a59b9ce75efca4 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -55,6 +55,7 @@ #include #include #include +#include /* * Known problems: @@ -150,6 +151,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) tsk->thread.sp0 = vm86->saved_sp0; tsk->thread.sysenter_cs = __KERNEL_CS; load_sp0(tss, &tsk->thread); + refresh_sysenter_cs(&tsk->thread); vm86->saved_sp0 = 0; put_cpu(); @@ -369,8 +371,10 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) /* make room for real-mode segments */ tsk->thread.sp0 += 16; - if (static_cpu_has(X86_FEATURE_SEP)) + if (static_cpu_has(X86_FEATURE_SEP)) { tsk->thread.sysenter_cs = 0; + refresh_sysenter_cs(&tsk->thread); + } load_sp0(tss, &tsk->thread); put_cpu(); From e37558449abadb91ac13b85737d963f230118bdf Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:10 -0700 Subject: [PATCH 0850/3326] x86/entry/64: Pass SP0 directly to load_sp0() commit da51da189a24bb9b7e2d5a123be096e51a4695a5 upstream. load_sp0() had an odd signature: void load_sp0(struct tss_struct *tss, struct thread_struct *thread); Simplify it to: void load_sp0(unsigned long sp0); Also simplify a few get_cpu()/put_cpu() sequences to preempt_disable()/preempt_enable(). Signed-off-by: Andy Lutomirski Reviewed-by: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/2655d8b42ed940aa384fe18ee1129bbbcf730a08.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/paravirt.h | 5 ++--- arch/x86/include/asm/paravirt_types.h | 2 +- arch/x86/include/asm/processor.h | 9 ++++----- arch/x86/kernel/cpu/common.c | 4 ++-- arch/x86/kernel/process_32.c | 2 +- arch/x86/kernel/process_64.c | 2 +- arch/x86/kernel/vm86_32.c | 14 ++++++-------- arch/x86/xen/enlighten_pv.c | 7 +++---- 8 files changed, 20 insertions(+), 25 deletions(-) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index fd81228e8037fe..283efcaac8aff8 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -16,10 +16,9 @@ #include #include -static inline void load_sp0(struct tss_struct *tss, - struct thread_struct *thread) +static inline void load_sp0(unsigned long sp0) { - PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread); + PVOP_VCALL1(pv_cpu_ops.load_sp0, sp0); } /* The paravirtualized CPUID instruction. */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 10cc3b9709fe0b..6ec54d01972dcf 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -134,7 +134,7 @@ struct pv_cpu_ops { void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); void (*free_ldt)(struct desc_struct *ldt, unsigned entries); - void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); + void (*load_sp0)(unsigned long sp0); void (*set_iopl_mask)(unsigned mask); diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index cdb4614f49fa69..90f66e6782943e 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -518,9 +518,9 @@ static inline void native_set_iopl_mask(unsigned mask) } static inline void -native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) +native_load_sp0(unsigned long sp0) { - tss->x86_tss.sp0 = thread->sp0; + this_cpu_write(cpu_tss.x86_tss.sp0, sp0); } static inline void native_swapgs(void) @@ -545,10 +545,9 @@ static inline unsigned long current_top_of_stack(void) #else #define __cpuid native_cpuid -static inline void load_sp0(struct tss_struct *tss, - struct thread_struct *thread) +static inline void load_sp0(unsigned long sp0) { - native_load_sp0(tss, thread); + native_load_sp0(sp0); } #define set_iopl_mask native_set_iopl_mask diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 03bb004bb15e2a..4e7fb9c3bfa5c1 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1570,7 +1570,7 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, me); - load_sp0(t, ¤t->thread); + load_sp0(current->thread.sp0); set_tss_desc(cpu, t); load_TR_desc(); load_mm_ldt(&init_mm); @@ -1625,7 +1625,7 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, curr); - load_sp0(t, thread); + load_sp0(thread->sp0); set_tss_desc(cpu, t); load_TR_desc(); load_mm_ldt(&init_mm); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 0936ed3da6b693..40b85870e42932 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -287,7 +287,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) * current_thread_info(). Refresh the SYSENTER configuration in * case prev or next is vm86. */ - load_sp0(tss, next); + load_sp0(next->sp0); refresh_sysenter_cs(next); this_cpu_write(cpu_current_top_of_stack, (unsigned long)task_stack_page(next_p) + diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index a6ff6d1a011011..2124304fb77abc 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -465,7 +465,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) this_cpu_write(current_task, next_p); /* Reload sp0. */ - load_sp0(tss, next); + load_sp0(next->sp0); /* * Now maybe reload the debug registers and handle I/O bitmaps diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index a59b9ce75efca4..f75e305a9b63af 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -95,7 +95,6 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) { - struct tss_struct *tss; struct task_struct *tsk = current; struct vm86plus_struct __user *user; struct vm86 *vm86 = current->thread.vm86; @@ -147,13 +146,13 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) do_exit(SIGSEGV); } - tss = &per_cpu(cpu_tss, get_cpu()); + preempt_disable(); tsk->thread.sp0 = vm86->saved_sp0; tsk->thread.sysenter_cs = __KERNEL_CS; - load_sp0(tss, &tsk->thread); + load_sp0(tsk->thread.sp0); refresh_sysenter_cs(&tsk->thread); vm86->saved_sp0 = 0; - put_cpu(); + preempt_enable(); memcpy(®s->pt, &vm86->regs32, sizeof(struct pt_regs)); @@ -239,7 +238,6 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) { - struct tss_struct *tss; struct task_struct *tsk = current; struct vm86 *vm86 = tsk->thread.vm86; struct kernel_vm86_regs vm86regs; @@ -367,8 +365,8 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) vm86->saved_sp0 = tsk->thread.sp0; lazy_save_gs(vm86->regs32.gs); - tss = &per_cpu(cpu_tss, get_cpu()); /* make room for real-mode segments */ + preempt_disable(); tsk->thread.sp0 += 16; if (static_cpu_has(X86_FEATURE_SEP)) { @@ -376,8 +374,8 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) refresh_sysenter_cs(&tsk->thread); } - load_sp0(tss, &tsk->thread); - put_cpu(); + load_sp0(tsk->thread.sp0); + preempt_enable(); if (vm86->flags & VM86_SCREEN_BITMAP) mark_screen_rdonly(tsk->mm); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 60eb6dcacaac52..e55d276afc709d 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -811,15 +811,14 @@ static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, } } -static void xen_load_sp0(struct tss_struct *tss, - struct thread_struct *thread) +static void xen_load_sp0(unsigned long sp0) { struct multicall_space mcs; mcs = xen_mc_entry(0); - MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); + MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0); xen_mc_issue(PARAVIRT_LAZY_CPU); - tss->x86_tss.sp0 = thread->sp0; + this_cpu_write(cpu_tss.x86_tss.sp0, sp0); } void xen_set_iopl_mask(unsigned mask) From f576136bc88175014ef3f57378d0d982a5c4e786 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:11 -0700 Subject: [PATCH 0851/3326] x86/entry: Add task_top_of_stack() to find the top of a task's stack commit 3500130b84a3cdc5b6796eba1daf178944935efe upstream. This will let us get rid of a few places that hardcode accesses to thread.sp0. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/b49b3f95a8ff858c40c9b0f5b32be0355324327d.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/processor.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 90f66e6782943e..98278a23809371 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -796,6 +796,8 @@ static inline void spin_lock_prefetch(const void *x) #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ TOP_OF_KERNEL_STACK_PADDING) +#define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1)) + #ifdef CONFIG_X86_32 /* * User space process size: 3GB (default). From 0917dd6e7a73c39d5bb1036e2e1fc73d5d56cb8c Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:12 -0700 Subject: [PATCH 0852/3326] x86/xen/64, x86/entry/64: Clean up SP code in cpu_initialize_context() commit f16b3da1dc936c0f8121741d0a1731bf242f2f56 upstream. I'm removing thread_struct::sp0, and Xen's usage of it is slightly dubious and unnecessary. Use appropriate helpers instead. While we're at at, reorder the code slightly to make it more obvious what's going on. Signed-off-by: Andy Lutomirski Reviewed-by: Juergen Gross Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/d5b9a3da2b47c68325bd2bbe8f82d9554dee0d0f.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/xen/smp_pv.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index 05f91ce9b55eea..c0c756c76afebf 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -14,6 +14,7 @@ * single-threaded. */ #include +#include #include #include #include @@ -294,12 +295,19 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) #endif memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); + /* + * Bring up the CPU in cpu_bringup_and_idle() with the stack + * pointing just below where pt_regs would be if it were a normal + * kernel entry. + */ ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; ctxt->flags = VGCF_IN_KERNEL; ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ ctxt->user_regs.ds = __USER_DS; ctxt->user_regs.es = __USER_DS; ctxt->user_regs.ss = __KERNEL_DS; + ctxt->user_regs.cs = __KERNEL_CS; + ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle); xen_copy_trap_info(ctxt->trap_ctxt); @@ -314,8 +322,13 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ctxt->gdt_frames[0] = gdt_mfn; ctxt->gdt_ents = GDT_ENTRIES; + /* + * Set SS:SP that Xen will use when entering guest kernel mode + * from guest user mode. Subsequent calls to load_sp0() can + * change this value. + */ ctxt->kernel_ss = __KERNEL_DS; - ctxt->kernel_sp = idle->thread.sp0; + ctxt->kernel_sp = task_top_of_stack(idle); #ifdef CONFIG_X86_32 ctxt->event_callback_cs = __KERNEL_CS; @@ -327,10 +340,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) (unsigned long)xen_hypervisor_callback; ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback; - ctxt->user_regs.cs = __KERNEL_CS; per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); - ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir)); if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt)) BUG(); From 71d7244efb0c9af0443f7376c844f3785a33497d Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:13 -0700 Subject: [PATCH 0853/3326] x86/entry/64: Stop initializing TSS.sp0 at boot commit 20bb83443ea79087b5e5f8dab4e9d80bb9bf7acb upstream. In my quest to get rid of thread_struct::sp0, I want to clean up or remove all of its readers. Two of them are in cpu_init() (32-bit and 64-bit), and they aren't needed. This is because we never enter userspace at all on the threads that CPUs are initialized in. Poison the initial TSS.sp0 and stop initializing it on CPU init. The comment text mostly comes from Dave Hansen. Thanks! Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/ee4a00540ad28c6cff475fbcc7769a4460acc861.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/common.c | 13 ++++++++++--- arch/x86/kernel/process.c | 8 +++++++- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4e7fb9c3bfa5c1..cdf79ab628c244 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1570,9 +1570,13 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, me); - load_sp0(current->thread.sp0); + /* + * Initialize the TSS. Don't bother initializing sp0, as the initial + * task never enters user mode. + */ set_tss_desc(cpu, t); load_TR_desc(); + load_mm_ldt(&init_mm); clear_all_debug_regs(); @@ -1594,7 +1598,6 @@ void cpu_init(void) int cpu = smp_processor_id(); struct task_struct *curr = current; struct tss_struct *t = &per_cpu(cpu_tss, cpu); - struct thread_struct *thread = &curr->thread; wait_for_master_cpu(cpu); @@ -1625,9 +1628,13 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, curr); - load_sp0(thread->sp0); + /* + * Initialize the TSS. Don't bother initializing sp0, as the initial + * task never enters user mode. + */ set_tss_desc(cpu, t); load_TR_desc(); + load_mm_ldt(&init_mm); t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c67685337c5ac3..97fb3e5737f5d0 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -49,7 +49,13 @@ */ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { .x86_tss = { - .sp0 = TOP_OF_INIT_STACK, + /* + * .sp0 is only used when entering ring 0 from a lower + * privilege level. Since the init task never runs anything + * but ring 0 code, there is no need for a valid value here. + * Poison it. + */ + .sp0 = (1UL << (BITS_PER_LONG-1)) + 1, #ifdef CONFIG_X86_32 .ss0 = __KERNEL_DS, .ss1 = __KERNEL_CS, From c30eb760e3ecc7aebf37e557baecefb244733caa Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:14 -0700 Subject: [PATCH 0854/3326] x86/entry/64: Remove all remaining direct thread_struct::sp0 reads commit 46f5a10a721ce8dce8cc8fe55279b49e1c6b3288 upstream. The only remaining readers in context switch code or vm86(), and they all just want to update TSS.sp0 to match the current task. Replace them all with a new helper update_sp0(). Signed-off-by: Andy Lutomirski Reviewed-by: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/2d231687f4ff288c9d9e98d7861b7df374246ac3.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/switch_to.h | 6 ++++++ arch/x86/kernel/process_32.c | 2 +- arch/x86/kernel/process_64.c | 2 +- arch/x86/kernel/vm86_32.c | 4 ++-- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 0fe929d1ff70a4..92dca86a39e3a7 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -85,4 +85,10 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread) } #endif +/* This is used when switching tasks or entering/exiting vm86 mode. */ +static inline void update_sp0(struct task_struct *task) +{ + load_sp0(task->thread.sp0); +} + #endif /* _ASM_X86_SWITCH_TO_H */ diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 40b85870e42932..45bf0c5f93e151 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -287,7 +287,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) * current_thread_info(). Refresh the SYSENTER configuration in * case prev or next is vm86. */ - load_sp0(next->sp0); + update_sp0(next_p); refresh_sysenter_cs(next); this_cpu_write(cpu_current_top_of_stack, (unsigned long)task_stack_page(next_p) + diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 2124304fb77abc..45e3809583926c 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -465,7 +465,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) this_cpu_write(current_task, next_p); /* Reload sp0. */ - load_sp0(next->sp0); + update_sp0(next_p); /* * Now maybe reload the debug registers and handle I/O bitmaps diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index f75e305a9b63af..5edb27f1a2c407 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -149,7 +149,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) preempt_disable(); tsk->thread.sp0 = vm86->saved_sp0; tsk->thread.sysenter_cs = __KERNEL_CS; - load_sp0(tsk->thread.sp0); + update_sp0(tsk); refresh_sysenter_cs(&tsk->thread); vm86->saved_sp0 = 0; preempt_enable(); @@ -374,7 +374,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) refresh_sysenter_cs(&tsk->thread); } - load_sp0(tsk->thread.sp0); + update_sp0(tsk); preempt_enable(); if (vm86->flags & VM86_SCREEN_BITMAP) From 266a0b19177e9ad7767aed90e14033dd46a8f000 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:15 -0700 Subject: [PATCH 0855/3326] x86/entry/32: Fix cpu_current_top_of_stack initialization at boot commit cd493a6deb8b78eca280d05f7fa73fd69403ae29 upstream. cpu_current_top_of_stack's initialization forgot about TOP_OF_KERNEL_STACK_PADDING. This bug didn't matter because the idle threads never enter user mode. Signed-off-by: Andy Lutomirski Reviewed-by: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/e5e370a7e6e4fddd1c4e4cf619765d96bb874b21.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/smpboot.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 5e0453f18a57c8..142126ab5aae8b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -962,8 +962,7 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle) #ifdef CONFIG_X86_32 /* Stack for startup_32 can be just as for start_secondary onwards */ irq_ctx_init(cpu); - per_cpu(cpu_current_top_of_stack, cpu) = - (unsigned long)task_stack_page(idle) + THREAD_SIZE; + per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle); #else initial_gs = per_cpu_offset(cpu); #endif From c6f563cd1393521c3bd74730cb972f4ec373694a Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:16 -0700 Subject: [PATCH 0856/3326] x86/entry/64: Remove thread_struct::sp0 commit d375cf1530595e33961a8844192cddab913650e3 upstream. On x86_64, we can easily calculate sp0 when needed instead of storing it in thread_struct. On x86_32, a similar cleanup would be possible, but it would require cleaning up the vm86 code first, and that can wait for a later cleanup series. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/719cd9c66c548c4350d98a90f050aee8b17f8919.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/compat.h | 1 + arch/x86/include/asm/processor.h | 28 +++++++++------------------- arch/x86/include/asm/switch_to.h | 6 ++++++ arch/x86/kernel/process_64.c | 1 - 4 files changed, 16 insertions(+), 20 deletions(-) diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index 70bc1df580b2da..2cbd75dd2fd354 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -7,6 +7,7 @@ */ #include #include +#include #include #include #include diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 98278a23809371..5c090050f75cd0 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -431,7 +431,9 @@ typedef struct { struct thread_struct { /* Cached TLS descriptors: */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; +#ifdef CONFIG_X86_32 unsigned long sp0; +#endif unsigned long sp; #ifdef CONFIG_X86_32 unsigned long sysenter_cs; @@ -798,6 +800,13 @@ static inline void spin_lock_prefetch(const void *x) #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1)) +#define task_pt_regs(task) \ +({ \ + unsigned long __ptr = (unsigned long)task_stack_page(task); \ + __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ + ((struct pt_regs *)__ptr) - 1; \ +}) + #ifdef CONFIG_X86_32 /* * User space process size: 3GB (default). @@ -817,23 +826,6 @@ static inline void spin_lock_prefetch(const void *x) .addr_limit = KERNEL_DS, \ } -/* - * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack. - * This is necessary to guarantee that the entire "struct pt_regs" - * is accessible even if the CPU haven't stored the SS/ESP registers - * on the stack (interrupt gate does not save these registers - * when switching to the same priv ring). - * Therefore beware: accessing the ss/esp fields of the - * "struct pt_regs" is possible, but they may contain the - * completely wrong values. - */ -#define task_pt_regs(task) \ -({ \ - unsigned long __ptr = (unsigned long)task_stack_page(task); \ - __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ - ((struct pt_regs *)__ptr) - 1; \ -}) - #define KSTK_ESP(task) (task_pt_regs(task)->sp) #else @@ -867,11 +859,9 @@ static inline void spin_lock_prefetch(const void *x) #define STACK_TOP_MAX TASK_SIZE_MAX #define INIT_THREAD { \ - .sp0 = TOP_OF_INIT_STACK, \ .addr_limit = KERNEL_DS, \ } -#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) extern unsigned long KSTK_ESP(struct task_struct *task); #endif /* CONFIG_X86_64 */ diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 92dca86a39e3a7..8c6bd6863db9d6 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -2,6 +2,8 @@ #ifndef _ASM_X86_SWITCH_TO_H #define _ASM_X86_SWITCH_TO_H +#include + struct task_struct; /* one of the stranger aspects of C forward declarations */ struct task_struct *__switch_to_asm(struct task_struct *prev, @@ -88,7 +90,11 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread) /* This is used when switching tasks or entering/exiting vm86 mode. */ static inline void update_sp0(struct task_struct *task) { +#ifdef CONFIG_X86_32 load_sp0(task->thread.sp0); +#else + load_sp0(task_top_of_stack(task)); +#endif } #endif /* _ASM_X86_SWITCH_TO_H */ diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 45e3809583926c..eeeb34f85c250e 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -274,7 +274,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, struct inactive_task_frame *frame; struct task_struct *me = current; - p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE; childregs = task_pt_regs(p); fork_frame = container_of(childregs, struct fork_frame, regs); frame = &fork_frame->frame; From 35c1d57e63914254d463a2361b6348a2b823c70d Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 2 Nov 2017 00:59:17 -0700 Subject: [PATCH 0857/3326] x86/traps: Use a new on_thread_stack() helper to clean up an assertion commit 3383642c2f9d4f5b4fa37436db4a109a1a10018c upstream. Let's keep the stack-related logic together rather than open-coding a comparison in an assertion in the traps code. Signed-off-by: Andy Lutomirski Reviewed-by: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/856b15bee1f55017b8f79d3758b0d51c48a08cf8.1509609304.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/processor.h | 6 ++++++ arch/x86/kernel/traps.c | 3 +-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 5c090050f75cd0..2db7cf720b04b2 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -542,6 +542,12 @@ static inline unsigned long current_top_of_stack(void) #endif } +static inline bool on_thread_stack(void) +{ + return (unsigned long)(current_top_of_stack() - + current_stack_pointer) < THREAD_SIZE; +} + #ifdef CONFIG_PARAVIRT #include #else diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 5a6b8f809792bf..d366adfc61dabe 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -141,8 +141,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) * will catch asm bugs and any attempt to use ist_preempt_enable * from double_fault. */ - BUG_ON((unsigned long)(current_top_of_stack() - - current_stack_pointer) >= THREAD_SIZE); + BUG_ON(!on_thread_stack()); preempt_enable_no_resched(); } From b36c2c3ab339f50fb60f095ee508fc3376bdb1d6 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Thu, 2 Nov 2017 13:09:26 +0100 Subject: [PATCH 0858/3326] x86/entry/64: Shorten TEST instructions commit 1e4c4f610f774df6088d7c065b2dd4d22adba698 upstream. Convert TESTL to TESTB and save 3 bytes per callsite. No functionality change. Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Brian Gerst Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20171102120926.4srwerqrr7g72e2k@pd.tnic Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index a9c248b443053a..5063ed1214dd7d 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -621,7 +621,7 @@ GLOBAL(retint_user) GLOBAL(swapgs_restore_regs_and_return_to_usermode) #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates user mode. */ - testl $3, CS(%rsp) + testb $3, CS(%rsp) jnz 1f ud2 1: @@ -654,7 +654,7 @@ retint_kernel: GLOBAL(restore_regs_and_return_to_kernel) #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates kernel mode. */ - testl $3, CS(%rsp) + testb $3, CS(%rsp) jz 1f ud2 1: From 3243ae92926c4ad3a7f9596952542c6803e69050 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 2 Nov 2017 13:22:35 +0100 Subject: [PATCH 0859/3326] x86/cpuid: Replace set/clear_bit32() commit 06dd688ddda5819025e014b79aea9af6ab475fa2 upstream. Peter pointed out that the set/clear_bit32() variants are broken in various aspects. Replace them with open coded set/clear_bit() and type cast cpu_info::x86_capability as it's done in all other places throughout x86. Fixes: 0b00de857a64 ("x86/cpuid: Add generic table for CPUID dependencies") Reported-by: Peter Ziljstra Signed-off-by: Thomas Gleixner Cc: Andi Kleen Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/cpuid-deps.c | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index c21f22d836ad74..904b0a3c4e53c1 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -62,23 +62,19 @@ const static struct cpuid_dep cpuid_deps[] = { {} }; -static inline void __clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit) -{ - clear_bit32(bit, c->x86_capability); -} - -static inline void __setup_clear_cpu_cap(unsigned int bit) -{ - clear_cpu_cap(&boot_cpu_data, bit); - set_bit32(bit, cpu_caps_cleared); -} - static inline void clear_feature(struct cpuinfo_x86 *c, unsigned int feature) { - if (!c) - __setup_clear_cpu_cap(feature); - else - __clear_cpu_cap(c, feature); + /* + * Note: This could use the non atomic __*_bit() variants, but the + * rest of the cpufeature code uses atomics as well, so keep it for + * consistency. Cleanup all of it separately. + */ + if (!c) { + clear_cpu_cap(&boot_cpu_data, feature); + set_bit(feature, (unsigned long *)cpu_caps_cleared); + } else { + clear_bit(feature, (unsigned long *)c->x86_capability); + } } /* Take the capabilities and the BUG bits into account */ From c1ffb6aefbc5aa6472e172d9c24c0e7a414a1e1f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 2 Nov 2017 13:30:03 +0100 Subject: [PATCH 0860/3326] bitops: Revert cbe96375025e ("bitops: Add clear/set_bit32() to linux/bitops.h") commit 1943dc07b45e347c52c1bfdd4a37e04a86e399aa upstream. These ops are not endian safe and may break on architectures which have aligment requirements. Reverts: cbe96375025e ("bitops: Add clear/set_bit32() to linux/bitops.h") Reported-by: Peter Zijlstra Signed-off-by: Thomas Gleixner Cc: Andi Kleen Signed-off-by: Greg Kroah-Hartman --- include/linux/bitops.h | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 8a7e9924df57c1..d03c5dd6185daa 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -228,32 +228,6 @@ static inline unsigned long __ffs64(u64 word) return __ffs((unsigned long)word); } -/* - * clear_bit32 - Clear a bit in memory for u32 array - * @nr: Bit to clear - * @addr: u32 * address of bitmap - * - * Same as clear_bit, but avoids needing casts for u32 arrays. - */ - -static __always_inline void clear_bit32(long nr, volatile u32 *addr) -{ - clear_bit(nr, (volatile unsigned long *)addr); -} - -/* - * set_bit32 - Set a bit in memory for u32 array - * @nr: Bit to clear - * @addr: u32 * address of bitmap - * - * Same as set_bit, but avoids needing casts for u32 arrays. - */ - -static __always_inline void set_bit32(long nr, volatile u32 *addr) -{ - set_bit(nr, (volatile unsigned long *)addr); -} - #ifdef __KERNEL__ #ifndef set_mask_bits From 64766453be2ee380ba7f3620bfeadcee2787145f Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 3 Nov 2017 11:20:28 +0100 Subject: [PATCH 0861/3326] x86/mm: Define _PAGE_TABLE using _KERNPG_TABLE commit c7da092a1f243bfd1bfb4124f538e69e941882da upstream. ... so that the difference is obvious. No functionality change. Signed-off-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20171103102028.20284-1-bp@alien8.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/pgtable_types.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 59df7b47a4349e..9e9b05fc4860e7 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -200,10 +200,9 @@ enum page_cache_mode { #define _PAGE_ENC (_AT(pteval_t, sme_me_mask)) -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ - _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_ENC) #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ _PAGE_DIRTY | _PAGE_ENC) +#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER) #define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _PAGE_ENC) #define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _PAGE_ENC) From 47af9e68f3f2e85b043e2c67e90d5aa90220a1d4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 31 Oct 2017 13:17:22 +0100 Subject: [PATCH 0862/3326] x86/cpufeatures: Re-tabulate the X86_FEATURE definitions commit acbc845ffefd9fb70466182cd8555a26189462b2 upstream. Over the years asm/cpufeatures.h has become somewhat of a mess: the original tabulation style was too narrow, while x86 feature names also kept growing in length, creating frequent field width overflows. Re-tabulate it to make it wider and easier to read/modify. Also harmonize the tabulation of the other defines in this file to match it. Cc: Andrew Morton Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20171031121723.28524-3-mingo@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 508 ++++++++++++++--------------- 1 file changed, 254 insertions(+), 254 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 74370734663c43..ad1b835001cc31 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -13,8 +13,8 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 18 /* N 32-bit words worth of info */ -#define NBUGINTS 1 /* N 32-bit bug flags */ +#define NCAPINTS 18 /* N 32-bit words worth of info */ +#define NBUGINTS 1 /* N 32-bit bug flags */ /* * Note: If the comment begins with a quoted string, that string is used @@ -28,163 +28,163 @@ */ /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ -#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ -#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ -#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ -#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ -#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ -#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ -#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ -#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ -#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ -#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ -#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ -#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ -#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ -#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ -#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ +#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ +#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ +#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ +#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ +#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ +#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ +#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ +#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ +#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ +#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ +#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ +#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ +#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ +#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ +#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ /* (plus FCMOVcc, FCOMI with FPU) */ -#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ -#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ -#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ -#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ -#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ -#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ -#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ -#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ -#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ -#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ -#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ -#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ -#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ -#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ -#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ +#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ +#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ +#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ +#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ +#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ +#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ +#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ +#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ +#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ +#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ +#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ +#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ +#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ +#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ +#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ /* Don't duplicate feature flags which are redundant with Intel! */ -#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ -#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ -#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ -#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ -#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ -#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ -#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ -#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ -#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ -#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ +#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ +#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ +#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ +#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ +#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ +#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ +#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ +#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ +#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ +#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ -#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ -#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ -#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ +#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ +#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ +#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ /* Other features, Linux-defined mapping, word 3 */ /* This range is used for feature bits which conflict or are synthesized */ -#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ -#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ -#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ -#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ +#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ +#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ +#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ +#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ /* cpu types for specific tunings: */ -#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ -#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ -#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ -#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ -#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ -#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ -#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */ -#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ -#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ -#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ -#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ -#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ -#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ -#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ -#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ -#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ -#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ -#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ -#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ -#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ -#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ -#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ -#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ -#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ -#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ -#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ -#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ +#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ +#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ +#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ +#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ +#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ +#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ +#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */ +#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ +#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ +#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ +#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ +#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ +#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ +#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ +#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ +#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ +#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ +#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ +#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ +#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ +#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ +#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ +#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ +#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ +#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ +#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ +#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ -#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ -#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ -#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ -#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ -#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ -#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ -#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ -#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ -#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ -#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ -#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ -#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ -#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ -#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ -#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ -#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ -#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ -#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ -#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ -#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ -#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ -#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ -#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ +#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ +#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ +#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ +#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ +#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ +#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ +#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ +#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ +#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ +#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ +#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ +#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ +#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ +#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ +#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ +#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ +#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ +#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ +#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ +#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ +#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ +#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ +#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ #define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ -#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ -#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ -#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ -#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ -#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ -#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ -#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ +#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ +#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ +#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ +#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ +#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ +#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ +#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ -#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ -#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ -#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ -#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ -#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ -#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ -#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ -#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ -#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ -#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ +#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ +#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ +#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ +#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ +#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ +#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ +#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ +#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ +#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ +#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ -#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ -#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ -#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ -#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ -#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ -#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ -#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ -#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ -#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ -#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ -#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ -#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ -#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ -#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ -#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ -#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ -#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ -#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ -#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ -#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ -#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ -#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ -#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ -#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */ -#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ -#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ +#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ +#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ +#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ +#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ +#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ +#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ +#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ +#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ +#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ +#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ +#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ +#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ +#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ +#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ +#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ +#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ +#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ +#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ +#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ +#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ +#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ +#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ +#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ +#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */ +#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ +#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ /* * Auxiliary flags: Linux defined - For features scattered in various @@ -192,152 +192,152 @@ * * Reuse free bits when adding new feature flags! */ -#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */ -#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ -#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ -#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ -#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ -#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ -#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ +#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */ +#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ +#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ +#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ +#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ +#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ +#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ -#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ -#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ -#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ +#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ +#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ +#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ -#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ -#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ -#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ -#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ +#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ +#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ +#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ -#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ +#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ /* Virtualization flags: Linux defined, word 8 */ -#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ -#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ -#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ -#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ -#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ +#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ +#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ +#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ +#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ +#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ -#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ -#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ +#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ +#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ -#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ -#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ -#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ -#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ -#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ -#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ -#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ -#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ -#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ -#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ -#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ -#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ -#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ -#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ -#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ -#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ -#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ -#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ -#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ -#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ -#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ -#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ -#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ -#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ -#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ -#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ -#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ +#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ +#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ +#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ +#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ +#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ +#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ +#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ +#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ +#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ +#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ +#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ +#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ +#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ +#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ +#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ +#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ +#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ +#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ +#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ +#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ +#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ +#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ +#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ +#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ +#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ +#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ +#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ /* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ -#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ -#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ -#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ -#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ +#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ +#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ +#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ +#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ /* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ -#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ +#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ /* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ -#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ -#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ -#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ +#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ +#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ +#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ -#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ -#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */ +#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ +#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ -#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ -#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ -#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ -#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ -#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ -#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ -#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ -#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ -#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ -#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ +#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ +#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ +#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ +#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ +#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ +#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ +#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ +#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ +#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ +#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ /* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */ -#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ -#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ -#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ -#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ -#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ -#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ -#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ -#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ -#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ -#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ -#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ -#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ -#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ +#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ +#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ +#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ +#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ +#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ +#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ +#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ +#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ +#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ +#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ +#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ +#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ +#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ -#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ -#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ -#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ -#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */ -#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */ -#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */ -#define X86_FEATURE_VPCLMULQDQ (16*32+ 10) /* Carry-Less Multiplication Double Quadword */ -#define X86_FEATURE_AVX512_VNNI (16*32+ 11) /* Vector Neural Network Instructions */ -#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB */ -#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ -#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ -#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ +#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ +#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ +#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ +#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */ +#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */ +#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */ +#define X86_FEATURE_VPCLMULQDQ (16*32+ 10) /* Carry-Less Multiplication Double Quadword */ +#define X86_FEATURE_AVX512_VNNI (16*32+ 11) /* Vector Neural Network Instructions */ +#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB */ +#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ +#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ +#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ /* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ -#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ -#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ -#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ +#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ +#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ +#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ /* * BUG word(s) */ -#define X86_BUG(x) (NCAPINTS*32 + (x)) +#define X86_BUG(x) (NCAPINTS*32 + (x)) -#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ -#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ -#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ -#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ -#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ -#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ -#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ -#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ -#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ +#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ +#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ +#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ +#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ +#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ +#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ +#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ +#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ +#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ #ifdef CONFIG_X86_32 /* * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional * to avoid confusion. */ -#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ +#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ #endif -#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ -#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ -#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ -#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ +#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ +#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ +#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ +#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ #endif /* _ASM_X86_CPUFEATURES_H */ From d9eb267780ff856580885df063c5f4a6630e345f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 31 Oct 2017 13:17:23 +0100 Subject: [PATCH 0863/3326] x86/cpufeatures: Fix various details in the feature definitions commit f3a624e901c633593156f7b00ca743a6204a29bc upstream. Kept this commit separate from the re-tabulation changes, to make the changes easier to review: - add better explanation for entries with no explanation - fix/enhance the text of some of the entries - fix the vertical alignment of some of the feature number definitions - fix inconsistent capitalization - ... and lots of other small details i.e. make it all more of a coherent unit, instead of a patchwork of years of additions. Cc: Andrew Morton Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20171031121723.28524-4-mingo@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 149 ++++++++++++++--------------- 1 file changed, 74 insertions(+), 75 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index ad1b835001cc31..cdf5be8668631f 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -20,14 +20,12 @@ * Note: If the comment begins with a quoted string, that string is used * in /proc/cpuinfo instead of the macro name. If the string is "", * this feature bit is not displayed in /proc/cpuinfo at all. - */ - -/* + * * When adding new features here that depend on other features, - * please update the table in kernel/cpu/cpuid-deps.c + * please update the table in kernel/cpu/cpuid-deps.c as well. */ -/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ +/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */ #define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ #define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ #define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ @@ -42,8 +40,7 @@ #define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ #define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ #define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ -#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ - /* (plus FCMOVcc, FCOMI with FPU) */ +#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */ #define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ #define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ #define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ @@ -63,15 +60,15 @@ /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ /* Don't duplicate feature flags which are redundant with Intel! */ #define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ -#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ +#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */ #define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ #define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ #define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ #define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ #define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ -#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ -#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ -#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ +#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */ +#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */ +#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */ /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ #define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ @@ -84,66 +81,67 @@ #define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ #define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ #define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ -/* cpu types for specific tunings: */ + +/* CPU types for specific tunings: */ #define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ #define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ #define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ #define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ -#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ -#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */ +#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */ +#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */ #define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ #define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ #define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ -#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ -#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ -#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ -#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ -#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ +#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */ +#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ +#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ +#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */ +#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */ #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ #define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ -#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ +#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */ #define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ #define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ #define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ -#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ -#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ -#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ +#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */ +#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */ +#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */ #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ #define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ -/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ +/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */ #define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ #define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ #define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ -#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ -#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ +#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */ +#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */ #define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ -#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ +#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */ #define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ #define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ #define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ #define X86_FEATURE_CID ( 4*32+10) /* Context ID */ #define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ #define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ -#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ +#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */ #define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ -#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ +#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */ #define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ #define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ #define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ #define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ -#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ +#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */ #define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ #define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ -#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ +#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */ #define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ -#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ -#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ +#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */ +#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */ #define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ -#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ -#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ +#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */ +#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */ #define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ @@ -158,10 +156,10 @@ #define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ #define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ -/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ +/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ #define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ -#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ +#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */ #define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ #define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ #define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ @@ -175,16 +173,16 @@ #define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ #define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ #define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ -#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ +#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */ #define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ -#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ -#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ -#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ +#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */ +#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */ +#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */ #define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ -#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ -#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */ +#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */ +#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */ #define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ -#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ +#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */ /* * Auxiliary flags: Linux defined - For features scattered in various @@ -192,7 +190,7 @@ * * Reuse free bits when adding new feature flags! */ -#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */ +#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */ #define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ #define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ #define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ @@ -206,8 +204,8 @@ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ -#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ -#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */ +#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ @@ -218,19 +216,19 @@ #define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ #define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ -#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ +#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */ #define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ -#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ -#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ +/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ +#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ +#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */ #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ #define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ -#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ +#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */ #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ #define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ #define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ @@ -238,8 +236,8 @@ #define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ #define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ #define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ -#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ -#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ +#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */ +#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */ #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ #define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ @@ -251,25 +249,25 @@ #define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ #define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ -/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ -#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ -#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ -#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ -#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ +/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */ +#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */ +#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */ +#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */ +#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */ -/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ +/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */ #define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ -/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ -#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ +/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */ +#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */ #define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ #define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ -/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ -#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ -#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */ +/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ +#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ +#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ -/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ +/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ #define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ #define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ @@ -281,7 +279,7 @@ #define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ #define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ -/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */ +/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */ #define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ #define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ #define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ @@ -296,24 +294,24 @@ #define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ #define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */ #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ #define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ #define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ #define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */ #define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */ #define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */ -#define X86_FEATURE_VPCLMULQDQ (16*32+ 10) /* Carry-Less Multiplication Double Quadword */ -#define X86_FEATURE_AVX512_VNNI (16*32+ 11) /* Vector Neural Network Instructions */ -#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB */ +#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */ +#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */ +#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */ #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ -/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ -#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ -#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ -#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ +/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ +#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ +#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */ +#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */ /* * BUG word(s) @@ -340,4 +338,5 @@ #define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ #define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ + #endif /* _ASM_X86_CPUFEATURES_H */ From 46e6a15b40c99402aedde940f157614e0c927c7e Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sat, 4 Nov 2017 04:19:50 -0700 Subject: [PATCH 0864/3326] selftests/x86/ldt_gdt: Add infrastructure to test set_thread_area() commit d744dcad39094c9187075e274d1cdef79c57c8b5 upstream. Much of the test design could apply to set_thread_area() (i.e. GDT), not just modify_ldt(). Add set_thread_area() to the install_valid_mode() helper. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/02c23f8fba5547007f741dc24c3926e5284ede02.1509794321.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/x86/ldt_gdt.c | 53 +++++++++++++++++++-------- 1 file changed, 37 insertions(+), 16 deletions(-) diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c index 2afc41a3730fb4..4e9add7bf8a93a 100644 --- a/tools/testing/selftests/x86/ldt_gdt.c +++ b/tools/testing/selftests/x86/ldt_gdt.c @@ -137,30 +137,51 @@ static void check_valid_segment(uint16_t index, int ldt, } } -static bool install_valid_mode(const struct user_desc *desc, uint32_t ar, - bool oldmode) +static bool install_valid_mode(const struct user_desc *d, uint32_t ar, + bool oldmode, bool ldt) { - int ret = syscall(SYS_modify_ldt, oldmode ? 1 : 0x11, - desc, sizeof(*desc)); - if (ret < -1) - errno = -ret; + struct user_desc desc = *d; + int ret; + + if (!ldt) { +#ifndef __i386__ + /* No point testing set_thread_area in a 64-bit build */ + return false; +#endif + if (!gdt_entry_num) + return false; + desc.entry_number = gdt_entry_num; + + ret = syscall(SYS_set_thread_area, &desc); + } else { + ret = syscall(SYS_modify_ldt, oldmode ? 1 : 0x11, + &desc, sizeof(desc)); + + if (ret < -1) + errno = -ret; + + if (ret != 0 && errno == ENOSYS) { + printf("[OK]\tmodify_ldt returned -ENOSYS\n"); + return false; + } + } + if (ret == 0) { - uint32_t limit = desc->limit; - if (desc->limit_in_pages) + uint32_t limit = desc.limit; + if (desc.limit_in_pages) limit = (limit << 12) + 4095; - check_valid_segment(desc->entry_number, 1, ar, limit, true); + check_valid_segment(desc.entry_number, ldt, ar, limit, true); return true; - } else if (errno == ENOSYS) { - printf("[OK]\tmodify_ldt returned -ENOSYS\n"); - return false; } else { - if (desc->seg_32bit) { - printf("[FAIL]\tUnexpected modify_ldt failure %d\n", + if (desc.seg_32bit) { + printf("[FAIL]\tUnexpected %s failure %d\n", + ldt ? "modify_ldt" : "set_thread_area", errno); nerrs++; return false; } else { - printf("[OK]\tmodify_ldt rejected 16 bit segment\n"); + printf("[OK]\t%s rejected 16 bit segment\n", + ldt ? "modify_ldt" : "set_thread_area"); return false; } } @@ -168,7 +189,7 @@ static bool install_valid_mode(const struct user_desc *desc, uint32_t ar, static bool install_valid(const struct user_desc *desc, uint32_t ar) { - return install_valid_mode(desc, ar, false); + return install_valid_mode(desc, ar, false, true); } static void install_invalid(const struct user_desc *desc, bool oldmode) From 4a464a66db6d574dd622b5218ca617da41461149 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sat, 4 Nov 2017 04:19:51 -0700 Subject: [PATCH 0865/3326] selftests/x86/ldt_gdt: Run most existing LDT test cases against the GDT as well commit adedf2893c192dd09b1cc2f2dcfdd7cad99ec49d upstream. Now that the main test infrastructure supports the GDT, run tests that will pass the kernel's GDT permission tests against the GDT. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/686a1eda63414da38fcecc2412db8dba1ae40581.1509794321.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/x86/ldt_gdt.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c index 4e9add7bf8a93a..66e5ce5b91f008 100644 --- a/tools/testing/selftests/x86/ldt_gdt.c +++ b/tools/testing/selftests/x86/ldt_gdt.c @@ -189,7 +189,15 @@ static bool install_valid_mode(const struct user_desc *d, uint32_t ar, static bool install_valid(const struct user_desc *desc, uint32_t ar) { - return install_valid_mode(desc, ar, false, true); + bool ret = install_valid_mode(desc, ar, false, true); + + if (desc->contents <= 1 && desc->seg_32bit && + !desc->seg_not_present) { + /* Should work in the GDT, too. */ + install_valid_mode(desc, ar, false, false); + } + + return ret; } static void install_invalid(const struct user_desc *desc, bool oldmode) From 99aee22dca890fb835570fa3a2a72ea43cca93ae Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 6 Nov 2017 18:44:24 +0000 Subject: [PATCH 0866/3326] ACPI / APEI: Replace ioremap_page_range() with fixmap commit 4f89fa286f6729312e227e7c2d764e8e7b9d340e upstream. Replace ghes_io{re,un}map_pfn_{nmi,irq}()s use of ioremap_page_range() with __set_fixmap() as ioremap_page_range() may sleep to allocate a new level of page-table, even if its passed an existing final-address to use in the mapping. The GHES driver can only be enabled for architectures that select HAVE_ACPI_APEI: Add fixmap entries to both x86 and arm64. clear_fixmap() does the TLB invalidation in __set_fixmap() for arm64 and __set_pte_vaddr() for x86. In each case its the same as the respective arch_apei_flush_tlb_one(). Reported-by: Fengguang Wu Suggested-by: Linus Torvalds Signed-off-by: James Morse Reviewed-by: Borislav Petkov Tested-by: Tyler Baicar Tested-by: Toshi Kani [ For the arm64 bits: ] Acked-by: Will Deacon [ For the x86 bits: ] Acked-by: Ingo Molnar Signed-off-by: Rafael J. Wysocki Cc: All applicable Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/fixmap.h | 7 ++++++ arch/x86/include/asm/fixmap.h | 6 +++++ drivers/acpi/apei/ghes.c | 44 +++++++++++---------------------- 3 files changed, 27 insertions(+), 30 deletions(-) diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index caf86be815ba2c..4052ec39e8dbb0 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -51,6 +51,13 @@ enum fixed_addresses { FIX_EARLYCON_MEM_BASE, FIX_TEXT_POKE0, + +#ifdef CONFIG_ACPI_APEI_GHES + /* Used for GHES mapping from assorted contexts */ + FIX_APEI_GHES_IRQ, + FIX_APEI_GHES_NMI, +#endif /* CONFIG_ACPI_APEI_GHES */ + __end_of_permanent_fixed_addresses, /* diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index dcd9fb55e67991..b0c505fe9a958c 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -104,6 +104,12 @@ enum fixed_addresses { FIX_GDT_REMAP_BEGIN, FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1, +#ifdef CONFIG_ACPI_APEI_GHES + /* Used for GHES mapping from assorted contexts */ + FIX_APEI_GHES_IRQ, + FIX_APEI_GHES_NMI, +#endif + __end_of_permanent_fixed_addresses, /* diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index cb7aceae35537a..572b6c7303edce 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include @@ -112,7 +113,7 @@ static DEFINE_MUTEX(ghes_list_mutex); * Because the memory area used to transfer hardware error information * from BIOS to Linux can be determined only in NMI, IRQ or timer * handler, but general ioremap can not be used in atomic context, so - * a special version of atomic ioremap is implemented for that. + * the fixmap is used instead. */ /* @@ -126,8 +127,8 @@ static DEFINE_MUTEX(ghes_list_mutex); /* virtual memory area for atomic ioremap */ static struct vm_struct *ghes_ioremap_area; /* - * These 2 spinlock is used to prevent atomic ioremap virtual memory - * area from being mapped simultaneously. + * These 2 spinlocks are used to prevent the fixmap entries from being used + * simultaneously. */ static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); @@ -159,53 +160,36 @@ static void ghes_ioremap_exit(void) static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) { - unsigned long vaddr; phys_addr_t paddr; pgprot_t prot; - vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr); - paddr = pfn << PAGE_SHIFT; prot = arch_apei_get_mem_attribute(paddr); - ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); + __set_fixmap(FIX_APEI_GHES_NMI, paddr, prot); - return (void __iomem *)vaddr; + return (void __iomem *) fix_to_virt(FIX_APEI_GHES_NMI); } static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) { - unsigned long vaddr; phys_addr_t paddr; pgprot_t prot; - vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); - paddr = pfn << PAGE_SHIFT; prot = arch_apei_get_mem_attribute(paddr); + __set_fixmap(FIX_APEI_GHES_IRQ, paddr, prot); - ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); - - return (void __iomem *)vaddr; + return (void __iomem *) fix_to_virt(FIX_APEI_GHES_IRQ); } -static void ghes_iounmap_nmi(void __iomem *vaddr_ptr) +static void ghes_iounmap_nmi(void) { - unsigned long vaddr = (unsigned long __force)vaddr_ptr; - void *base = ghes_ioremap_area->addr; - - BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); - unmap_kernel_range_noflush(vaddr, PAGE_SIZE); - arch_apei_flush_tlb_one(vaddr); + clear_fixmap(FIX_APEI_GHES_NMI); } -static void ghes_iounmap_irq(void __iomem *vaddr_ptr) +static void ghes_iounmap_irq(void) { - unsigned long vaddr = (unsigned long __force)vaddr_ptr; - void *base = ghes_ioremap_area->addr; - - BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); - unmap_kernel_range_noflush(vaddr, PAGE_SIZE); - arch_apei_flush_tlb_one(vaddr); + clear_fixmap(FIX_APEI_GHES_IRQ); } static int ghes_estatus_pool_init(void) @@ -361,10 +345,10 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, paddr += trunk; buffer += trunk; if (in_nmi) { - ghes_iounmap_nmi(vaddr); + ghes_iounmap_nmi(); raw_spin_unlock(&ghes_ioremap_lock_nmi); } else { - ghes_iounmap_irq(vaddr); + ghes_iounmap_irq(); spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); } } From 04d26709b13e72b02d5e435c6a3bebb63167ac71 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 9 Nov 2017 14:27:35 +0100 Subject: [PATCH 0867/3326] x86/virt, x86/platform: Merge 'struct x86_hyper' into 'struct x86_platform' and 'struct x86_init' commit f72e38e8ec8869ac0ba5a75d7d2f897d98a1454e upstream. Instead of x86_hyper being either NULL on bare metal or a pointer to a struct hypervisor_x86 in case of the kernel running as a guest merge the struct into x86_platform and x86_init. This will remove the need for wrappers making it hard to find out what is being called. With dummy functions added for all callbacks testing for a NULL function pointer can be removed, too. Suggested-by: Ingo Molnar Signed-off-by: Juergen Gross Acked-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: akataria@vmware.com Cc: boris.ostrovsky@oracle.com Cc: devel@linuxdriverproject.org Cc: haiyangz@microsoft.com Cc: kvm@vger.kernel.org Cc: kys@microsoft.com Cc: pbonzini@redhat.com Cc: rkrcmar@redhat.com Cc: rusty@rustcorp.com.au Cc: sthemmin@microsoft.com Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/20171109132739.23465-2-jgross@suse.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/hypervisor.h | 25 +++----------- arch/x86/include/asm/x86_init.h | 24 ++++++++++++++ arch/x86/kernel/apic/apic.c | 2 +- arch/x86/kernel/cpu/hypervisor.c | 54 +++++++++++++++---------------- arch/x86/kernel/cpu/mshyperv.c | 2 +- arch/x86/kernel/cpu/vmware.c | 4 +-- arch/x86/kernel/kvm.c | 2 +- arch/x86/kernel/x86_init.c | 9 ++++++ arch/x86/mm/init.c | 2 +- arch/x86/xen/enlighten_hvm.c | 8 ++--- arch/x86/xen/enlighten_pv.c | 2 +- include/linux/hypervisor.h | 8 +++-- 12 files changed, 81 insertions(+), 61 deletions(-) diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index 0ead9dbb91301d..0eca7239a7aade 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -23,6 +23,7 @@ #ifdef CONFIG_HYPERVISOR_GUEST #include +#include #include /* @@ -35,17 +36,11 @@ struct hypervisor_x86 { /* Detection routine */ uint32_t (*detect)(void); - /* Platform setup (run once per boot) */ - void (*init_platform)(void); + /* init time callbacks */ + struct x86_hyper_init init; - /* X2APIC detection (run once per boot) */ - bool (*x2apic_available)(void); - - /* pin current vcpu to specified physical cpu (run rarely) */ - void (*pin_vcpu)(int); - - /* called during init_mem_mapping() to setup early mappings. */ - void (*init_mem_mapping)(void); + /* runtime callbacks */ + struct x86_hyper_runtime runtime; }; extern const struct hypervisor_x86 *x86_hyper; @@ -58,17 +53,7 @@ extern const struct hypervisor_x86 x86_hyper_xen_hvm; extern const struct hypervisor_x86 x86_hyper_kvm; extern void init_hypervisor_platform(void); -extern bool hypervisor_x2apic_available(void); -extern void hypervisor_pin_vcpu(int cpu); - -static inline void hypervisor_init_mem_mapping(void) -{ - if (x86_hyper && x86_hyper->init_mem_mapping) - x86_hyper->init_mem_mapping(); -} #else static inline void init_hypervisor_platform(void) { } -static inline bool hypervisor_x2apic_available(void) { return false; } -static inline void hypervisor_init_mem_mapping(void) { } #endif /* CONFIG_HYPERVISOR_GUEST */ #endif /* _ASM_X86_HYPERVISOR_H */ diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 8a1ebf9540ddf0..ad15a0fda91745 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -114,6 +114,18 @@ struct x86_init_pci { void (*fixup_irqs)(void); }; +/** + * struct x86_hyper_init - x86 hypervisor init functions + * @init_platform: platform setup + * @x2apic_available: X2APIC detection + * @init_mem_mapping: setup early mappings during init_mem_mapping() + */ +struct x86_hyper_init { + void (*init_platform)(void); + bool (*x2apic_available)(void); + void (*init_mem_mapping)(void); +}; + /** * struct x86_init_ops - functions for platform specific setup * @@ -127,6 +139,7 @@ struct x86_init_ops { struct x86_init_timers timers; struct x86_init_iommu iommu; struct x86_init_pci pci; + struct x86_hyper_init hyper; }; /** @@ -199,6 +212,15 @@ struct x86_legacy_features { struct x86_legacy_devices devices; }; +/** + * struct x86_hyper_runtime - x86 hypervisor specific runtime callbacks + * + * @pin_vcpu: pin current vcpu to specified physical cpu (run rarely) + */ +struct x86_hyper_runtime { + void (*pin_vcpu)(int cpu); +}; + /** * struct x86_platform_ops - platform specific runtime functions * @calibrate_cpu: calibrate CPU @@ -218,6 +240,7 @@ struct x86_legacy_features { * possible in x86_early_init_platform_quirks() by * only using the current x86_hardware_subarch * semantics. + * @hyper: x86 hypervisor specific runtime callbacks */ struct x86_platform_ops { unsigned long (*calibrate_cpu)(void); @@ -233,6 +256,7 @@ struct x86_platform_ops { void (*apic_post_init)(void); struct x86_legacy_features legacy; void (*set_legacy_features)(void); + struct x86_hyper_runtime hyper; }; struct pci_dev; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ff891772c9f864..89c7c8569e5e75 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1645,7 +1645,7 @@ static __init void try_to_enable_x2apic(int remap_mode) * under KVM */ if (max_physical_apicid > 255 || - !hypervisor_x2apic_available()) { + !x86_init.hyper.x2apic_available()) { pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n"); x2apic_disable(); return; diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 4fa90006ac68cb..22226c1bf092a3 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c @@ -44,51 +44,49 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] = const struct hypervisor_x86 *x86_hyper; EXPORT_SYMBOL(x86_hyper); -static inline void __init +static inline const struct hypervisor_x86 * __init detect_hypervisor_vendor(void) { - const struct hypervisor_x86 *h, * const *p; + const struct hypervisor_x86 *h = NULL, * const *p; uint32_t pri, max_pri = 0; for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) { - h = *p; - pri = h->detect(); - if (pri != 0 && pri > max_pri) { + pri = (*p)->detect(); + if (pri > max_pri) { max_pri = pri; - x86_hyper = h; + h = *p; } } - if (max_pri) - pr_info("Hypervisor detected: %s\n", x86_hyper->name); + if (h) + pr_info("Hypervisor detected: %s\n", h->name); + + return h; } -void __init init_hypervisor_platform(void) +static void __init copy_array(const void *src, void *target, unsigned int size) { + unsigned int i, n = size / sizeof(void *); + const void * const *from = (const void * const *)src; + const void **to = (const void **)target; - detect_hypervisor_vendor(); - - if (!x86_hyper) - return; - - if (x86_hyper->init_platform) - x86_hyper->init_platform(); + for (i = 0; i < n; i++) + if (from[i]) + to[i] = from[i]; } -bool __init hypervisor_x2apic_available(void) +void __init init_hypervisor_platform(void) { - return x86_hyper && - x86_hyper->x2apic_available && - x86_hyper->x2apic_available(); -} + const struct hypervisor_x86 *h; -void hypervisor_pin_vcpu(int cpu) -{ - if (!x86_hyper) + h = detect_hypervisor_vendor(); + + if (!h) return; - if (x86_hyper->pin_vcpu) - x86_hyper->pin_vcpu(cpu); - else - WARN_ONCE(1, "vcpu pinning requested but not supported!\n"); + copy_array(&h->init, &x86_init.hyper, sizeof(h->init)); + copy_array(&h->runtime, &x86_platform.hyper, sizeof(h->runtime)); + + x86_hyper = h; + x86_init.hyper.init_platform(); } diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 236324e83a3ae0..6bb84d655e4b7d 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -257,6 +257,6 @@ static void __init ms_hyperv_init_platform(void) const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { .name = "Microsoft Hyper-V", .detect = ms_hyperv_platform, - .init_platform = ms_hyperv_init_platform, + .init.init_platform = ms_hyperv_init_platform, }; EXPORT_SYMBOL(x86_hyper_ms_hyperv); diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 40ed26852ebd9a..4804c1d063c84f 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -208,7 +208,7 @@ static bool __init vmware_legacy_x2apic_available(void) const __refconst struct hypervisor_x86 x86_hyper_vmware = { .name = "VMware", .detect = vmware_platform, - .init_platform = vmware_platform_setup, - .x2apic_available = vmware_legacy_x2apic_available, + .init.init_platform = vmware_platform_setup, + .init.x2apic_available = vmware_legacy_x2apic_available, }; EXPORT_SYMBOL(x86_hyper_vmware); diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 8bb9594d076166..9dca8437c79504 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -547,7 +547,7 @@ static uint32_t __init kvm_detect(void) const struct hypervisor_x86 x86_hyper_kvm __refconst = { .name = "KVM", .detect = kvm_detect, - .x2apic_available = kvm_para_available, + .init.x2apic_available = kvm_para_available, }; EXPORT_SYMBOL_GPL(x86_hyper_kvm); diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index a088b2c47f7396..5b2d10c1973ab1 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -28,6 +28,8 @@ void x86_init_noop(void) { } void __init x86_init_uint_noop(unsigned int unused) { } int __init iommu_init_noop(void) { return 0; } void iommu_shutdown_noop(void) { } +bool __init bool_x86_init_noop(void) { return false; } +void x86_op_int_noop(int cpu) { } /* * The platform setup functions are preset with the default functions @@ -81,6 +83,12 @@ struct x86_init_ops x86_init __initdata = { .init_irq = x86_default_pci_init_irq, .fixup_irqs = x86_default_pci_fixup_irqs, }, + + .hyper = { + .init_platform = x86_init_noop, + .x2apic_available = bool_x86_init_noop, + .init_mem_mapping = x86_init_noop, + }, }; struct x86_cpuinit_ops x86_cpuinit = { @@ -101,6 +109,7 @@ struct x86_platform_ops x86_platform __ro_after_init = { .get_nmi_reason = default_get_nmi_reason, .save_sched_clock_state = tsc_save_sched_clock_state, .restore_sched_clock_state = tsc_restore_sched_clock_state, + .hyper.pin_vcpu = x86_op_int_noop, }; EXPORT_SYMBOL_GPL(x86_platform); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index af5c1ed21d43ac..a22c2b95e51339 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -671,7 +671,7 @@ void __init init_mem_mapping(void) load_cr3(swapper_pg_dir); __flush_tlb_all(); - hypervisor_init_mem_mapping(); + x86_init.hyper.init_mem_mapping(); early_memtest(0, max_pfn_mapped << PAGE_SHIFT); } diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index de503c225ae1f1..7b1622089f9659 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -229,9 +229,9 @@ static uint32_t __init xen_platform_hvm(void) const struct hypervisor_x86 x86_hyper_xen_hvm = { .name = "Xen HVM", .detect = xen_platform_hvm, - .init_platform = xen_hvm_guest_init, - .pin_vcpu = xen_pin_vcpu, - .x2apic_available = xen_x2apic_para_available, - .init_mem_mapping = xen_hvm_init_mem_mapping, + .init.init_platform = xen_hvm_guest_init, + .init.x2apic_available = xen_x2apic_para_available, + .init.init_mem_mapping = xen_hvm_init_mem_mapping, + .runtime.pin_vcpu = xen_pin_vcpu, }; EXPORT_SYMBOL(x86_hyper_xen_hvm); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index e55d276afc709d..57d63b0f1c0cf3 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1462,6 +1462,6 @@ static uint32_t __init xen_platform_pv(void) const struct hypervisor_x86 x86_hyper_xen_pv = { .name = "Xen PV", .detect = xen_platform_pv, - .pin_vcpu = xen_pin_vcpu, + .runtime.pin_vcpu = xen_pin_vcpu, }; EXPORT_SYMBOL(x86_hyper_xen_pv); diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h index b4054fd5b6f66f..b19563f9a8ebb9 100644 --- a/include/linux/hypervisor.h +++ b/include/linux/hypervisor.h @@ -7,8 +7,12 @@ * Juergen Gross */ -#ifdef CONFIG_HYPERVISOR_GUEST -#include +#ifdef CONFIG_X86 +#include +static inline void hypervisor_pin_vcpu(int cpu) +{ + x86_platform.hyper.pin_vcpu(cpu); +} #else static inline void hypervisor_pin_vcpu(int cpu) { From 399bbc9bb611e10dc5fd232b96b0bea18fca3482 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 9 Nov 2017 14:27:36 +0100 Subject: [PATCH 0868/3326] x86/virt: Add enum for hypervisors to replace x86_hyper commit 03b2a320b19f1424e9ac9c21696be9c60b6d0d93 upstream. The x86_hyper pointer is only used for checking whether a virtual device is supporting the hypervisor the system is running on. Use an enum for that purpose instead and drop the x86_hyper pointer. Signed-off-by: Juergen Gross Acked-by: Thomas Gleixner Acked-by: Xavier Deguillard Cc: Linus Torvalds Cc: Peter Zijlstra Cc: akataria@vmware.com Cc: arnd@arndb.de Cc: boris.ostrovsky@oracle.com Cc: devel@linuxdriverproject.org Cc: dmitry.torokhov@gmail.com Cc: gregkh@linuxfoundation.org Cc: haiyangz@microsoft.com Cc: kvm@vger.kernel.org Cc: kys@microsoft.com Cc: linux-graphics-maintainer@vmware.com Cc: linux-input@vger.kernel.org Cc: moltmann@vmware.com Cc: pbonzini@redhat.com Cc: pv-drivers@vmware.com Cc: rkrcmar@redhat.com Cc: sthemmin@microsoft.com Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/20171109132739.23465-3-jgross@suse.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/hyperv/hv_init.c | 2 +- arch/x86/include/asm/hypervisor.h | 23 ++++++++++++++--------- arch/x86/kernel/cpu/hypervisor.c | 12 +++++++++--- arch/x86/kernel/cpu/mshyperv.c | 4 ++-- arch/x86/kernel/cpu/vmware.c | 4 ++-- arch/x86/kernel/kvm.c | 4 ++-- arch/x86/xen/enlighten_hvm.c | 4 ++-- arch/x86/xen/enlighten_pv.c | 4 ++-- drivers/hv/vmbus_drv.c | 2 +- drivers/input/mouse/vmmouse.c | 10 ++++------ drivers/misc/vmw_balloon.c | 2 +- 11 files changed, 40 insertions(+), 31 deletions(-) diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index a5db63f728a2f9..a0b86cf486e0ad 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -113,7 +113,7 @@ void hyperv_init(void) u64 guest_id; union hv_x64_msr_hypercall_contents hypercall_msr; - if (x86_hyper != &x86_hyper_ms_hyperv) + if (x86_hyper_type != X86_HYPER_MS_HYPERV) return; /* Allocate percpu VP index */ diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index 0eca7239a7aade..1b0a5abcd8aeb6 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -29,6 +29,16 @@ /* * x86 hypervisor information */ + +enum x86_hypervisor_type { + X86_HYPER_NATIVE = 0, + X86_HYPER_VMWARE, + X86_HYPER_MS_HYPERV, + X86_HYPER_XEN_PV, + X86_HYPER_XEN_HVM, + X86_HYPER_KVM, +}; + struct hypervisor_x86 { /* Hypervisor name */ const char *name; @@ -36,6 +46,9 @@ struct hypervisor_x86 { /* Detection routine */ uint32_t (*detect)(void); + /* Hypervisor type */ + enum x86_hypervisor_type type; + /* init time callbacks */ struct x86_hyper_init init; @@ -43,15 +56,7 @@ struct hypervisor_x86 { struct x86_hyper_runtime runtime; }; -extern const struct hypervisor_x86 *x86_hyper; - -/* Recognized hypervisors */ -extern const struct hypervisor_x86 x86_hyper_vmware; -extern const struct hypervisor_x86 x86_hyper_ms_hyperv; -extern const struct hypervisor_x86 x86_hyper_xen_pv; -extern const struct hypervisor_x86 x86_hyper_xen_hvm; -extern const struct hypervisor_x86 x86_hyper_kvm; - +extern enum x86_hypervisor_type x86_hyper_type; extern void init_hypervisor_platform(void); #else static inline void init_hypervisor_platform(void) { } diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 22226c1bf092a3..bea8d3e24f5087 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c @@ -26,6 +26,12 @@ #include #include +extern const struct hypervisor_x86 x86_hyper_vmware; +extern const struct hypervisor_x86 x86_hyper_ms_hyperv; +extern const struct hypervisor_x86 x86_hyper_xen_pv; +extern const struct hypervisor_x86 x86_hyper_xen_hvm; +extern const struct hypervisor_x86 x86_hyper_kvm; + static const __initconst struct hypervisor_x86 * const hypervisors[] = { #ifdef CONFIG_XEN_PV @@ -41,8 +47,8 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] = #endif }; -const struct hypervisor_x86 *x86_hyper; -EXPORT_SYMBOL(x86_hyper); +enum x86_hypervisor_type x86_hyper_type; +EXPORT_SYMBOL(x86_hyper_type); static inline const struct hypervisor_x86 * __init detect_hypervisor_vendor(void) @@ -87,6 +93,6 @@ void __init init_hypervisor_platform(void) copy_array(&h->init, &x86_init.hyper, sizeof(h->init)); copy_array(&h->runtime, &x86_platform.hyper, sizeof(h->runtime)); - x86_hyper = h; + x86_hyper_type = h->type; x86_init.hyper.init_platform(); } diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 6bb84d655e4b7d..85eb5fc180c819 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -254,9 +254,9 @@ static void __init ms_hyperv_init_platform(void) #endif } -const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { +const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = { .name = "Microsoft Hyper-V", .detect = ms_hyperv_platform, + .type = X86_HYPER_MS_HYPERV, .init.init_platform = ms_hyperv_init_platform, }; -EXPORT_SYMBOL(x86_hyper_ms_hyperv); diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 4804c1d063c84f..8e005329648b6b 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -205,10 +205,10 @@ static bool __init vmware_legacy_x2apic_available(void) (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0; } -const __refconst struct hypervisor_x86 x86_hyper_vmware = { +const __initconst struct hypervisor_x86 x86_hyper_vmware = { .name = "VMware", .detect = vmware_platform, + .type = X86_HYPER_VMWARE, .init.init_platform = vmware_platform_setup, .init.x2apic_available = vmware_legacy_x2apic_available, }; -EXPORT_SYMBOL(x86_hyper_vmware); diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 9dca8437c79504..a94de09edbed25 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -544,12 +544,12 @@ static uint32_t __init kvm_detect(void) return kvm_cpuid_base(); } -const struct hypervisor_x86 x86_hyper_kvm __refconst = { +const __initconst struct hypervisor_x86 x86_hyper_kvm = { .name = "KVM", .detect = kvm_detect, + .type = X86_HYPER_KVM, .init.x2apic_available = kvm_para_available, }; -EXPORT_SYMBOL_GPL(x86_hyper_kvm); static __init int activate_jump_labels(void) { diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index 7b1622089f9659..754d5391d9fa76 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -226,12 +226,12 @@ static uint32_t __init xen_platform_hvm(void) return xen_cpuid_base(); } -const struct hypervisor_x86 x86_hyper_xen_hvm = { +const __initconst struct hypervisor_x86 x86_hyper_xen_hvm = { .name = "Xen HVM", .detect = xen_platform_hvm, + .type = X86_HYPER_XEN_HVM, .init.init_platform = xen_hvm_guest_init, .init.x2apic_available = xen_x2apic_para_available, .init.init_mem_mapping = xen_hvm_init_mem_mapping, .runtime.pin_vcpu = xen_pin_vcpu, }; -EXPORT_SYMBOL(x86_hyper_xen_hvm); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 57d63b0f1c0cf3..fbd054d6ac973d 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1459,9 +1459,9 @@ static uint32_t __init xen_platform_pv(void) return 0; } -const struct hypervisor_x86 x86_hyper_xen_pv = { +const __initconst struct hypervisor_x86 x86_hyper_xen_pv = { .name = "Xen PV", .detect = xen_platform_pv, + .type = X86_HYPER_XEN_PV, .runtime.pin_vcpu = xen_pin_vcpu, }; -EXPORT_SYMBOL(x86_hyper_xen_pv); diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 937801ac2fe0ea..2cd134dd94d214 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1534,7 +1534,7 @@ static int __init hv_acpi_init(void) { int ret, t; - if (x86_hyper != &x86_hyper_ms_hyperv) + if (x86_hyper_type != X86_HYPER_MS_HYPERV) return -ENODEV; init_completion(&probe_event); diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c index 0f586780ceb4be..1ae5c1ef3f5bbb 100644 --- a/drivers/input/mouse/vmmouse.c +++ b/drivers/input/mouse/vmmouse.c @@ -316,11 +316,9 @@ static int vmmouse_enable(struct psmouse *psmouse) /* * Array of supported hypervisors. */ -static const struct hypervisor_x86 *vmmouse_supported_hypervisors[] = { - &x86_hyper_vmware, -#ifdef CONFIG_KVM_GUEST - &x86_hyper_kvm, -#endif +static enum x86_hypervisor_type vmmouse_supported_hypervisors[] = { + X86_HYPER_VMWARE, + X86_HYPER_KVM, }; /** @@ -331,7 +329,7 @@ static bool vmmouse_check_hypervisor(void) int i; for (i = 0; i < ARRAY_SIZE(vmmouse_supported_hypervisors); i++) - if (vmmouse_supported_hypervisors[i] == x86_hyper) + if (vmmouse_supported_hypervisors[i] == x86_hyper_type) return true; return false; diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 1e688bfec56728..9047c0a529b282 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -1271,7 +1271,7 @@ static int __init vmballoon_init(void) * Check if we are running on VMware's hypervisor and bail out * if we are not. */ - if (x86_hyper != &x86_hyper_vmware) + if (x86_hyper_type != X86_HYPER_VMWARE) return -ENODEV; for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES; From 330a4f53bbb4add9c998344af15c27afc9b1e825 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 5 Dec 2017 14:14:47 +0100 Subject: [PATCH 0869/3326] drivers/misc/intel/pti: Rename the header file to free up the namespace commit 1784f9144b143a1e8b19fe94083b040aa559182b upstream. We'd like to use the 'PTI' acronym for 'Page Table Isolation' - free up the namespace by renaming the driver header to . (Also standardize the header guard name while at it.) Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: J Freyensee Cc: Greg Kroah-Hartman Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- drivers/misc/pti.c | 2 +- include/linux/{pti.h => intel-pti.h} | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) rename include/linux/{pti.h => intel-pti.h} (94%) diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c index eda38cbe85307e..41f2a9f6851d9e 100644 --- a/drivers/misc/pti.c +++ b/drivers/misc/pti.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include diff --git a/include/linux/pti.h b/include/linux/intel-pti.h similarity index 94% rename from include/linux/pti.h rename to include/linux/intel-pti.h index b3ea01a3197efa..2710d72de3c926 100644 --- a/include/linux/pti.h +++ b/include/linux/intel-pti.h @@ -22,8 +22,8 @@ * interface to write out it's contents for debugging a mobile system. */ -#ifndef PTI_H_ -#define PTI_H_ +#ifndef LINUX_INTEL_PTI_H_ +#define LINUX_INTEL_PTI_H_ /* offset for last dword of any PTI message. Part of MIPI P1149.7 */ #define PTI_LASTDWORD_DTS 0x30 @@ -40,4 +40,4 @@ struct pti_masterchannel *pti_request_masterchannel(u8 type, const char *thread_name); void pti_release_masterchannel(struct pti_masterchannel *mc); -#endif /*PTI_H_*/ +#endif /* LINUX_INTEL_PTI_H_ */ From c6e38628af6d6e06d6b6038c00ed140323e3ba97 Mon Sep 17 00:00:00 2001 From: Ricardo Neri Date: Sun, 5 Nov 2017 18:27:51 -0800 Subject: [PATCH 0870/3326] x86/cpufeature: Add User-Mode Instruction Prevention definitions commit a8b4db562e7283a1520f9e9730297ecaab7622ea upstream. [ Note, this is a Git cherry-pick of the following commit: (limited to the cpufeatures.h file) 3522c2a6a4f3 ("x86/cpufeature: Add User-Mode Instruction Prevention definitions") ... for easier x86 PTI code testing and back-porting. ] User-Mode Instruction Prevention is a security feature present in new Intel processors that, when set, prevents the execution of a subset of instructions if such instructions are executed in user mode (CPL > 0). Attempting to execute such instructions causes a general protection exception. The subset of instructions comprises: * SGDT - Store Global Descriptor Table * SIDT - Store Interrupt Descriptor Table * SLDT - Store Local Descriptor Table * SMSW - Store Machine Status Word * STR - Store Task Register This feature is also added to the list of disabled-features to allow a cleaner handling of build-time configuration. Signed-off-by: Ricardo Neri Reviewed-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Chen Yucong Cc: Chris Metcalf Cc: Dave Hansen Cc: Denys Vlasenko Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Huang Rui Cc: Jiri Slaby Cc: Jonathan Corbet Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Michael S. Tsirkin Cc: Paolo Bonzini Cc: Paul Gortmaker Cc: Peter Zijlstra Cc: Ravi V. Shankar Cc: Shuah Khan Cc: Tony Luck Cc: Vlastimil Babka Cc: ricardo.neri@intel.com Link: http://lkml.kernel.org/r/1509935277-22138-7-git-send-email-ricardo.neri-calderon@linux.intel.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index cdf5be8668631f..c0b0e9e8aa66eb 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -296,6 +296,7 @@ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */ #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ +#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */ #define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ #define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ #define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */ From e918424231eef4f6652c2221ba2f1d6b44e04233 Mon Sep 17 00:00:00 2001 From: Rudolf Marek Date: Tue, 28 Nov 2017 22:01:06 +0100 Subject: [PATCH 0871/3326] x86: Make X86_BUG_FXSAVE_LEAK detectable in CPUID on AMD commit f2dbad36c55e5d3a91dccbde6e8cae345fe5632f upstream. [ Note, this is a Git cherry-pick of the following commit: 2b67799bdf25 ("x86: Make X86_BUG_FXSAVE_LEAK detectable in CPUID on AMD") ... for easier x86 PTI code testing and back-porting. ] The latest AMD AMD64 Architecture Programmer's Manual adds a CPUID feature XSaveErPtr (CPUID_Fn80000008_EBX[2]). If this feature is set, the FXSAVE, XSAVE, FXSAVEOPT, XSAVEC, XSAVES / FXRSTOR, XRSTOR, XRSTORS always save/restore error pointers, thus making the X86_BUG_FXSAVE_LEAK workaround obsolete on such CPUs. Signed-Off-By: Rudolf Marek Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Tested-by: Borislav Petkov Cc: Andy Lutomirski Link: https://lkml.kernel.org/r/bdcebe90-62c5-1f05-083c-eba7f08b2540@assembler.cz Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/kernel/cpu/amd.c | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index c0b0e9e8aa66eb..800104c8a3edfe 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -266,6 +266,7 @@ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ +#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index d58184b7cd4438..bcb75dc97d4407 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -804,8 +804,11 @@ static void init_amd(struct cpuinfo_x86 *c) case 0x17: init_amd_zn(c); break; } - /* Enable workaround for FXSAVE leak */ - if (c->x86 >= 6) + /* + * Enable workaround for FXSAVE leak on CPUs + * without a XSaveErPtr feature + */ + if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR))) set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); cpu_detect_cache_sizes(c); From 2d8c24ed9310b89921bd0459d8df1258134015d4 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 31 Aug 2017 14:46:30 -0700 Subject: [PATCH 0872/3326] perf/x86: Enable free running PEBS for REGS_USER/INTR commit 2fe1bc1f501d55e5925b4035bcd85781adc76c63 upstream. [ Note, this is a Git cherry-pick of the following commit: a47ba4d77e12 ("perf/x86: Enable free running PEBS for REGS_USER/INTR") ... for easier x86 PTI code testing and back-porting. ] Currently free running PEBS is disabled when user or interrupt registers are requested. Most of the registers are actually available in the PEBS record and can be supported. So we just need to check for the supported registers and then allow it: it is all except for the segment register. For user registers this only works when the counter is limited to ring 3 only, so this also needs to be checked. Signed-off-by: Andi Kleen Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20170831214630.21892-1-andi@firstfloor.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/intel/core.c | 4 ++++ arch/x86/events/perf_event.h | 24 +++++++++++++++++++++++- 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index f94855000d4ed0..09c26a4f139c12 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2958,6 +2958,10 @@ static unsigned long intel_pmu_free_running_flags(struct perf_event *event) if (event->attr.use_clockid) flags &= ~PERF_SAMPLE_TIME; + if (!event->attr.exclude_kernel) + flags &= ~PERF_SAMPLE_REGS_USER; + if (event->attr.sample_regs_user & ~PEBS_REGS) + flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); return flags; } diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 4196f81ec0e1b0..f7aaadf9331fb7 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -85,13 +85,15 @@ struct amd_nb { * Flags PEBS can handle without an PMI. * * TID can only be handled by flushing at context switch. + * REGS_USER can be handled for events limited to ring 3. * */ #define PEBS_FREERUNNING_FLAGS \ (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ - PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR) + PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ + PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER) /* * A debug store configuration. @@ -110,6 +112,26 @@ struct debug_store { u64 pebs_event_reset[MAX_PEBS_EVENTS]; }; +#define PEBS_REGS \ + (PERF_REG_X86_AX | \ + PERF_REG_X86_BX | \ + PERF_REG_X86_CX | \ + PERF_REG_X86_DX | \ + PERF_REG_X86_DI | \ + PERF_REG_X86_SI | \ + PERF_REG_X86_SP | \ + PERF_REG_X86_BP | \ + PERF_REG_X86_IP | \ + PERF_REG_X86_FLAGS | \ + PERF_REG_X86_R8 | \ + PERF_REG_X86_R9 | \ + PERF_REG_X86_R10 | \ + PERF_REG_X86_R11 | \ + PERF_REG_X86_R12 | \ + PERF_REG_X86_R13 | \ + PERF_REG_X86_R14 | \ + PERF_REG_X86_R15) + /* * Per register state. */ From 065060cdd3de1f7a1f08284999081c4c38601ef7 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 12 Dec 2017 02:25:31 +0100 Subject: [PATCH 0873/3326] bpf: fix build issues on um due to mising bpf_perf_event.h commit ab95477e7cb35557ecfc837687007b646bab9a9f upstream. [ Note, this is a Git cherry-pick of the following commit: a23f06f06dbe ("bpf: fix build issues on um due to mising bpf_perf_event.h") ... for easier x86 PTI code testing and back-porting. ] Since c895f6f703ad ("bpf: correct broken uapi for BPF_PROG_TYPE_PERF_EVENT program type") um (uml) won't build on i386 or x86_64: [...] CC init/main.o In file included from ../include/linux/perf_event.h:18:0, from ../include/linux/trace_events.h:10, from ../include/trace/syscall.h:7, from ../include/linux/syscalls.h:82, from ../init/main.c:20: ../include/uapi/linux/bpf_perf_event.h:11:32: fatal error: asm/bpf_perf_event.h: No such file or directory #include [...] Lets add missing bpf_perf_event.h also to um arch. This seems to be the only one still missing. Fixes: c895f6f703ad ("bpf: correct broken uapi for BPF_PROG_TYPE_PERF_EVENT program type") Reported-by: Randy Dunlap Suggested-by: Richard Weinberger Signed-off-by: Daniel Borkmann Tested-by: Randy Dunlap Cc: Hendrik Brueckner Cc: Richard Weinberger Acked-by: Alexei Starovoitov Acked-by: Richard Weinberger Signed-off-by: Alexei Starovoitov Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/um/include/asm/Kbuild | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 50a32c33d729ba..73c57f614c9e06 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -1,4 +1,5 @@ generic-y += barrier.h +generic-y += bpf_perf_event.h generic-y += bug.h generic-y += clkdev.h generic-y += current.h From 1aedecaf12a67de999eaf233cdb64839170ca035 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 24 Oct 2017 11:22:47 +0100 Subject: [PATCH 0874/3326] locking/barriers: Add implicit smp_read_barrier_depends() to READ_ONCE() commit c2bc66082e1048c7573d72e62f597bdc5ce13fea upstream. [ Note, this is a Git cherry-pick of the following commit: 76ebbe78f739 ("locking/barriers: Add implicit smp_read_barrier_depends() to READ_ONCE()") ... for easier x86 PTI code testing and back-porting. ] In preparation for the removal of lockless_dereference(), which is the same as READ_ONCE() on all architectures other than Alpha, add an implicit smp_read_barrier_depends() to READ_ONCE() so that it can be used to head dependency chains on all architectures. Signed-off-by: Will Deacon Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1508840570-22169-3-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- include/linux/compiler.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 202710420d6deb..712cd8bb00b4b9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -341,6 +341,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s __read_once_size(&(x), __u.__c, sizeof(x)); \ else \ __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ + smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ __u.__val; \ }) #define READ_ONCE(x) __READ_ONCE(x, 1) From 5383f45db38c0459f8d3d3f90f7f0a132ad104f3 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 24 Oct 2017 11:22:48 +0100 Subject: [PATCH 0875/3326] locking/barriers: Convert users of lockless_dereference() to READ_ONCE() commit 3382290ed2d5e275429cef510ab21889d3ccd164 upstream. [ Note, this is a Git cherry-pick of the following commit: 506458efaf15 ("locking/barriers: Convert users of lockless_dereference() to READ_ONCE()") ... for easier x86 PTI code testing and back-porting. ] READ_ONCE() now has an implicit smp_read_barrier_depends() call, so it can be used instead of lockless_dereference() without any change in semantics. Signed-off-by: Will Deacon Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1508840570-22169-4-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/core.c | 2 +- arch/x86/include/asm/mmu_context.h | 4 ++-- arch/x86/kernel/ldt.c | 2 +- drivers/md/dm-mpath.c | 20 ++++++++++---------- fs/dcache.c | 4 ++-- fs/overlayfs/ovl_entry.h | 2 +- fs/overlayfs/readdir.c | 2 +- include/linux/rculist.h | 4 ++-- include/linux/rcupdate.h | 4 ++-- kernel/events/core.c | 4 ++-- kernel/seccomp.c | 2 +- kernel/task_work.c | 2 +- mm/slab.h | 2 +- 13 files changed, 27 insertions(+), 27 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 80534d3c248001..589af1eec7c1cb 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2371,7 +2371,7 @@ static unsigned long get_segment_base(unsigned int segment) struct ldt_struct *ldt; /* IRQs are off, so this synchronizes with smp_store_release */ - ldt = lockless_dereference(current->active_mm->context.ldt); + ldt = READ_ONCE(current->active_mm->context.ldt); if (!ldt || idx >= ldt->nr_entries) return 0; diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 6699fc44164419..6d16d15d09a0da 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -73,8 +73,8 @@ static inline void load_mm_ldt(struct mm_struct *mm) #ifdef CONFIG_MODIFY_LDT_SYSCALL struct ldt_struct *ldt; - /* lockless_dereference synchronizes with smp_store_release */ - ldt = lockless_dereference(mm->context.ldt); + /* READ_ONCE synchronizes with smp_store_release */ + ldt = READ_ONCE(mm->context.ldt); /* * Any change to mm->context.ldt is followed by an IPI to all diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index ae5615b03def7b..1c1eae9613406b 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -103,7 +103,7 @@ static void finalize_ldt_struct(struct ldt_struct *ldt) static void install_ldt(struct mm_struct *current_mm, struct ldt_struct *ldt) { - /* Synchronizes with lockless_dereference in load_mm_ldt. */ + /* Synchronizes with READ_ONCE in load_mm_ldt. */ smp_store_release(¤t_mm->context.ldt, ldt); /* Activate the LDT for all CPUs using current_mm. */ diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 35e82b14ded75f..ddf0a4341ae298 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m, pgpath = path_to_pgpath(path); - if (unlikely(lockless_dereference(m->current_pg) != pg)) { + if (unlikely(READ_ONCE(m->current_pg) != pg)) { /* Only update current_pgpath if pg changed */ spin_lock_irqsave(&m->lock, flags); m->current_pgpath = pgpath; @@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) } /* Were we instructed to switch PG? */ - if (lockless_dereference(m->next_pg)) { + if (READ_ONCE(m->next_pg)) { spin_lock_irqsave(&m->lock, flags); pg = m->next_pg; if (!pg) { @@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) /* Don't change PG until it has no remaining paths */ check_current_pg: - pg = lockless_dereference(m->current_pg); + pg = READ_ONCE(m->current_pg); if (pg) { pgpath = choose_path_in_pg(m, pg, nr_bytes); if (!IS_ERR_OR_NULL(pgpath)) @@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, struct request *clone; /* Do we need to select a new pgpath? */ - pgpath = lockless_dereference(m->current_pgpath); + pgpath = READ_ONCE(m->current_pgpath); if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) pgpath = choose_pgpath(m, nr_bytes); @@ -533,7 +533,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m bool queue_io; /* Do we need to select a new pgpath? */ - pgpath = lockless_dereference(m->current_pgpath); + pgpath = READ_ONCE(m->current_pgpath); queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); if (!pgpath || !queue_io) pgpath = choose_pgpath(m, nr_bytes); @@ -1802,7 +1802,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, struct pgpath *current_pgpath; int r; - current_pgpath = lockless_dereference(m->current_pgpath); + current_pgpath = READ_ONCE(m->current_pgpath); if (!current_pgpath) current_pgpath = choose_pgpath(m, 0); @@ -1824,7 +1824,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, } if (r == -ENOTCONN) { - if (!lockless_dereference(m->current_pg)) { + if (!READ_ONCE(m->current_pg)) { /* Path status changed, redo selection */ (void) choose_pgpath(m, 0); } @@ -1893,9 +1893,9 @@ static int multipath_busy(struct dm_target *ti) return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED); /* Guess which priority_group will be used at next mapping time */ - pg = lockless_dereference(m->current_pg); - next_pg = lockless_dereference(m->next_pg); - if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg)) + pg = READ_ONCE(m->current_pg); + next_pg = READ_ONCE(m->next_pg); + if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg)) pg = next_pg; if (!pg) { diff --git a/fs/dcache.c b/fs/dcache.c index f90141387f01ea..34c852af215c0f 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -231,7 +231,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c { /* * Be careful about RCU walk racing with rename: - * use 'lockless_dereference' to fetch the name pointer. + * use 'READ_ONCE' to fetch the name pointer. * * NOTE! Even if a rename will mean that the length * was not loaded atomically, we don't care. The @@ -245,7 +245,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c * early because the data cannot match (there can * be no NUL in the ct/tcount data) */ - const unsigned char *cs = lockless_dereference(dentry->d_name.name); + const unsigned char *cs = READ_ONCE(dentry->d_name.name); return dentry_string_cmp(cs, ct, tcount); } diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index 25d9b5adcd4290..36b49bd09264a5 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h @@ -77,5 +77,5 @@ static inline struct ovl_inode *OVL_I(struct inode *inode) static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi) { - return lockless_dereference(oi->__upperdentry); + return READ_ONCE(oi->__upperdentry); } diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index b2c7f33e08fce1..d94a51dc4e32da 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -757,7 +757,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) { struct inode *inode = file_inode(file); - realfile = lockless_dereference(od->upperfile); + realfile = READ_ONCE(od->upperfile); if (!realfile) { struct path upperpath; diff --git a/include/linux/rculist.h b/include/linux/rculist.h index c2cdd45a880aa0..127f534fec94aa 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -275,7 +275,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_entry_rcu(ptr, type, member) \ - container_of(lockless_dereference(ptr), type, member) + container_of(READ_ONCE(ptr), type, member) /* * Where are list_empty_rcu() and list_first_entry_rcu()? @@ -368,7 +368,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * example is when items are added to the list, but never deleted. */ #define list_entry_lockless(ptr, type, member) \ - container_of((typeof(ptr))lockless_dereference(ptr), type, member) + container_of((typeof(ptr))READ_ONCE(ptr), type, member) /** * list_for_each_entry_lockless - iterate over rcu list of given type diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 1a9f70d44af954..a6ddc42f87a578 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -346,7 +346,7 @@ static inline void rcu_preempt_sleep_check(void) { } #define __rcu_dereference_check(p, c, space) \ ({ \ /* Dependency order vs. p above. */ \ - typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ + typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ rcu_dereference_sparse(p, space); \ ((typeof(*p) __force __kernel *)(________p1)); \ @@ -360,7 +360,7 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_dereference_raw(p) \ ({ \ /* Dependency order vs. p above. */ \ - typeof(p) ________p1 = lockless_dereference(p); \ + typeof(p) ________p1 = READ_ONCE(p); \ ((typeof(*p) __force __kernel *)(________p1)); \ }) diff --git a/kernel/events/core.c b/kernel/events/core.c index 4f1d4bfc607a32..24ebad5567b4a7 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4233,7 +4233,7 @@ static void perf_remove_from_owner(struct perf_event *event) * indeed free this event, otherwise we need to serialize on * owner->perf_event_mutex. */ - owner = lockless_dereference(event->owner); + owner = READ_ONCE(event->owner); if (owner) { /* * Since delayed_put_task_struct() also drops the last @@ -4330,7 +4330,7 @@ int perf_event_release_kernel(struct perf_event *event) * Cannot change, child events are not migrated, see the * comment with perf_event_ctx_lock_nested(). */ - ctx = lockless_dereference(child->ctx); + ctx = READ_ONCE(child->ctx); /* * Since child_mutex nests inside ctx::mutex, we must jump * through hoops. We start by grabbing a reference on the ctx. diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 418a1c045933dc..5f0dfb2abb8d39 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -190,7 +190,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd, u32 ret = SECCOMP_RET_ALLOW; /* Make sure cross-thread synced filter points somewhere sane. */ struct seccomp_filter *f = - lockless_dereference(current->seccomp.filter); + READ_ONCE(current->seccomp.filter); /* Ensure unexpected behavior doesn't result in failing open. */ if (unlikely(WARN_ON(f == NULL))) diff --git a/kernel/task_work.c b/kernel/task_work.c index 5718b3ea202a3f..0fef395662a6ea 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c @@ -68,7 +68,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func) * we raced with task_work_run(), *pprev == NULL/exited. */ raw_spin_lock_irqsave(&task->pi_lock, flags); - while ((work = lockless_dereference(*pprev))) { + while ((work = READ_ONCE(*pprev))) { if (work->func != func) pprev = &work->next; else if (cmpxchg(pprev, work, work->next) == work) diff --git a/mm/slab.h b/mm/slab.h index 028cdc7df67ec9..86d7c7d860f92c 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -259,7 +259,7 @@ cache_from_memcg_idx(struct kmem_cache *s, int idx) * memcg_caches issues a write barrier to match this (see * memcg_create_kmem_cache()). */ - cachep = lockless_dereference(arr->entries[idx]); + cachep = READ_ONCE(arr->entries[idx]); rcu_read_unlock(); return cachep; From d455b71e7393975c5c50c26bd2199f19c6eed80f Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Wed, 15 Nov 2017 17:36:35 -0800 Subject: [PATCH 0876/3326] x86/mm/kasan: Don't use vmemmap_populate() to initialize shadow commit 2aeb07365bcd489620f71390a7d2031cd4dfb83e upstream. [ Note, this is a Git cherry-pick of the following commit: d17a1d97dc20: ("x86/mm/kasan: don't use vmemmap_populate() to initialize shadow") ... for easier x86 PTI code testing and back-porting. ] The KASAN shadow is currently mapped using vmemmap_populate() since that provides a semi-convenient way to map pages into init_top_pgt. However, since that no longer zeroes the mapped pages, it is not suitable for KASAN, which requires zeroed shadow memory. Add kasan_populate_shadow() interface and use it instead of vmemmap_populate(). Besides, this allows us to take advantage of gigantic pages and use them to populate the shadow, which should save us some memory wasted on page tables and reduce TLB pressure. Link: http://lkml.kernel.org/r/20171103185147.2688-2-pasha.tatashin@oracle.com Signed-off-by: Andrey Ryabinin Signed-off-by: Pavel Tatashin Cc: Andy Lutomirski Cc: Steven Sistare Cc: Daniel Jordan Cc: Bob Picco Cc: Michal Hocko Cc: Alexander Potapenko Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: Christian Borntraeger Cc: David S. Miller Cc: Dmitry Vyukov Cc: Heiko Carstens Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Mark Rutland Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Sam Ravnborg Cc: Thomas Gleixner Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/Kconfig | 2 +- arch/x86/mm/kasan_init_64.c | 143 ++++++++++++++++++++++++++++++++++-- 2 files changed, 137 insertions(+), 8 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 59d5b3fa88e601..48646160eb8369 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -108,7 +108,7 @@ config X86 select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE select HAVE_ARCH_JUMP_LABEL - select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP + select HAVE_ARCH_KASAN if X86_64 select HAVE_ARCH_KGDB select HAVE_ARCH_KMEMCHECK select HAVE_ARCH_MMAP_RND_BITS if MMU diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 2b60dc6e64b1da..99dfed6dfef8b2 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -4,12 +4,14 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -18,7 +20,134 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES]; static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); -static int __init map_range(struct range *range) +static __init void *early_alloc(size_t size, int nid) +{ + return memblock_virt_alloc_try_nid_nopanic(size, size, + __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); +} + +static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, + unsigned long end, int nid) +{ + pte_t *pte; + + if (pmd_none(*pmd)) { + void *p; + + if (boot_cpu_has(X86_FEATURE_PSE) && + ((end - addr) == PMD_SIZE) && + IS_ALIGNED(addr, PMD_SIZE)) { + p = early_alloc(PMD_SIZE, nid); + if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) + return; + else if (p) + memblock_free(__pa(p), PMD_SIZE); + } + + p = early_alloc(PAGE_SIZE, nid); + pmd_populate_kernel(&init_mm, pmd, p); + } + + pte = pte_offset_kernel(pmd, addr); + do { + pte_t entry; + void *p; + + if (!pte_none(*pte)) + continue; + + p = early_alloc(PAGE_SIZE, nid); + entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); + set_pte_at(&init_mm, addr, pte, entry); + } while (pte++, addr += PAGE_SIZE, addr != end); +} + +static void __init kasan_populate_pud(pud_t *pud, unsigned long addr, + unsigned long end, int nid) +{ + pmd_t *pmd; + unsigned long next; + + if (pud_none(*pud)) { + void *p; + + if (boot_cpu_has(X86_FEATURE_GBPAGES) && + ((end - addr) == PUD_SIZE) && + IS_ALIGNED(addr, PUD_SIZE)) { + p = early_alloc(PUD_SIZE, nid); + if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) + return; + else if (p) + memblock_free(__pa(p), PUD_SIZE); + } + + p = early_alloc(PAGE_SIZE, nid); + pud_populate(&init_mm, pud, p); + } + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (!pmd_large(*pmd)) + kasan_populate_pmd(pmd, addr, next, nid); + } while (pmd++, addr = next, addr != end); +} + +static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr, + unsigned long end, int nid) +{ + pud_t *pud; + unsigned long next; + + if (p4d_none(*p4d)) { + void *p = early_alloc(PAGE_SIZE, nid); + + p4d_populate(&init_mm, p4d, p); + } + + pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (!pud_large(*pud)) + kasan_populate_pud(pud, addr, next, nid); + } while (pud++, addr = next, addr != end); +} + +static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, + unsigned long end, int nid) +{ + void *p; + p4d_t *p4d; + unsigned long next; + + if (pgd_none(*pgd)) { + p = early_alloc(PAGE_SIZE, nid); + pgd_populate(&init_mm, pgd, p); + } + + p4d = p4d_offset(pgd, addr); + do { + next = p4d_addr_end(addr, end); + kasan_populate_p4d(p4d, addr, next, nid); + } while (p4d++, addr = next, addr != end); +} + +static void __init kasan_populate_shadow(unsigned long addr, unsigned long end, + int nid) +{ + pgd_t *pgd; + unsigned long next; + + addr = addr & PAGE_MASK; + end = round_up(end, PAGE_SIZE); + pgd = pgd_offset_k(addr); + do { + next = pgd_addr_end(addr, end); + kasan_populate_pgd(pgd, addr, next, nid); + } while (pgd++, addr = next, addr != end); +} + +static void __init map_range(struct range *range) { unsigned long start; unsigned long end; @@ -26,7 +155,7 @@ static int __init map_range(struct range *range) start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start)); end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end)); - return vmemmap_populate(start, end, NUMA_NO_NODE); + kasan_populate_shadow(start, end, early_pfn_to_nid(range->start)); } static void __init clear_pgds(unsigned long start, @@ -189,16 +318,16 @@ void __init kasan_init(void) if (pfn_mapped[i].end == 0) break; - if (map_range(&pfn_mapped[i])) - panic("kasan: unable to allocate shadow!"); + map_range(&pfn_mapped[i]); } + kasan_populate_zero_shadow( kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), kasan_mem_to_shadow((void *)__START_KERNEL_map)); - vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext), - (unsigned long)kasan_mem_to_shadow(_end), - NUMA_NO_NODE); + kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), + (unsigned long)kasan_mem_to_shadow(_end), + early_pfn_to_nid(__pa(_stext))); kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), (void *)KASAN_SHADOW_END); From 21ddc15fa82b7a2a7b6209437c4d137672370c86 Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Mon, 4 Dec 2017 15:07:07 +0100 Subject: [PATCH 0877/3326] x86/entry/64/paravirt: Use paravirt-safe macro to access eflags commit e17f8234538d1ff708673f287a42457c4dee720d upstream. Commit 1d3e53e8624a ("x86/entry/64: Refactor IRQ stacks and make them NMI-safe") added DEBUG_ENTRY_ASSERT_IRQS_OFF macro that acceses eflags using 'pushfq' instruction when testing for IF bit. On PV Xen guests looking at IF flag directly will always see it set, resulting in 'ud2'. Introduce SAVE_FLAGS() macro that will use appropriate save_fl pv op when running paravirt. Signed-off-by: Boris Ostrovsky Signed-off-by: Thomas Gleixner Reviewed-by: Juergen Gross Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: xen-devel@lists.xenproject.org Link: https://lkml.kernel.org/r/20171204150604.899457242@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 7 ++++--- arch/x86/include/asm/irqflags.h | 3 +++ arch/x86/include/asm/paravirt.h | 9 +++++++++ arch/x86/kernel/asm-offsets_64.c | 3 +++ 4 files changed, 19 insertions(+), 3 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 5063ed1214dd7d..162ce952d71611 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -462,12 +462,13 @@ END(irq_entries_start) .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY - pushfq - testl $X86_EFLAGS_IF, (%rsp) + pushq %rax + SAVE_FLAGS(CLBR_RAX) + testl $X86_EFLAGS_IF, %eax jz .Lokay_\@ ud2 .Lokay_\@: - addq $8, %rsp + popq %rax #endif .endm diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index c8ef23f2c28f17..89f08955fff733 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -142,6 +142,9 @@ static inline notrace unsigned long arch_local_irq_save(void) swapgs; \ sysretl +#ifdef CONFIG_DEBUG_ENTRY +#define SAVE_FLAGS(x) pushfq; popq %rax +#endif #else #define INTERRUPT_RETURN iret #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 283efcaac8aff8..892df375b6155a 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -927,6 +927,15 @@ extern void default_banner(void); PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ CLBR_NONE, \ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64)) + +#ifdef CONFIG_DEBUG_ENTRY +#define SAVE_FLAGS(clobbers) \ + PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \ + PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ + call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \ + PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) +#endif + #endif /* CONFIG_X86_32 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 630212fa9b9da3..e3a5175a444bc0 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -23,6 +23,9 @@ int main(void) #ifdef CONFIG_PARAVIRT OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64); OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); +#ifdef CONFIG_DEBUG_ENTRY + OFFSET(PV_IRQ_save_fl, pv_irq_ops, save_fl); +#endif BLANK(); #endif From 40ddc692b5a11cf7c3f9677744676af617d54a24 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:08 +0100 Subject: [PATCH 0878/3326] x86/unwinder/orc: Dont bail on stack overflow commit d3a09104018cf2ad5973dfa8a9c138ef9f5015a3 upstream. If the stack overflows into a guard page and the ORC unwinder should work well: by construction, there can't be any meaningful data in the guard page because no writes to the guard page will have succeeded. But there is a bug that prevents unwinding from working correctly: if the starting register state has RSP pointing into a stack guard page, the ORC unwinder bails out immediately. Instead of bailing out immediately check whether the next page up is a valid check page and if so analyze that. As a result the ORC unwinder will start the unwind. Tested by intentionally overflowing the task stack. The result is an accurate call trace instead of a trace consisting purely of '?' entries. There are a few other bugs that are triggered if the unwinder encounters a stack overflow after the first step, but they are outside the scope of this fix. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150604.991389777@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/unwind_orc.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index a3f973b2c97a03..ff8e1132b2ae41 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -553,8 +553,18 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, } if (get_stack_info((unsigned long *)state->sp, state->task, - &state->stack_info, &state->stack_mask)) - return; + &state->stack_info, &state->stack_mask)) { + /* + * We weren't on a valid stack. It's possible that + * we overflowed a valid stack into a guard page. + * See if the next page up is valid so that we can + * generate some kind of backtrace if this happens. + */ + void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp); + if (get_stack_info(next_page, state->task, &state->stack_info, + &state->stack_mask)) + return; + } /* * The caller can provide the address of the first frame directly From 5209e8ac937261925c12db63a28cfaa033fa30ed Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 4 Dec 2017 15:07:09 +0100 Subject: [PATCH 0879/3326] x86/unwinder: Handle stack overflows more gracefully commit b02fcf9ba1211097754b286043cd87a8b4907e75 upstream. There are at least two unwinder bugs hindering the debugging of stack-overflow crashes: - It doesn't deal gracefully with the case where the stack overflows and the stack pointer itself isn't on a valid stack but the to-be-dereferenced data *is*. - The ORC oops dump code doesn't know how to print partial pt_regs, for the case where if we get an interrupt/exception in *early* entry code before the full pt_regs have been saved. Fix both issues. http://lkml.kernel.org/r/20171126024031.uxi4numpbjm5rlbr@treble Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150605.071425003@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/kdebug.h | 1 + arch/x86/include/asm/unwind.h | 7 ++++ arch/x86/kernel/dumpstack.c | 32 ++++++++++++--- arch/x86/kernel/process_64.c | 11 +++--- arch/x86/kernel/unwind_orc.c | 74 ++++++++++++----------------------- 5 files changed, 65 insertions(+), 60 deletions(-) diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h index f86a8caa561e88..395c9631e000a3 100644 --- a/arch/x86/include/asm/kdebug.h +++ b/arch/x86/include/asm/kdebug.h @@ -26,6 +26,7 @@ extern void die(const char *, struct pt_regs *,long); extern int __must_check __die(const char *, struct pt_regs *, long); extern void show_stack_regs(struct pt_regs *regs); extern void __show_regs(struct pt_regs *regs, int all); +extern void show_iret_regs(struct pt_regs *regs); extern unsigned long oops_begin(void); extern void oops_end(unsigned long, struct pt_regs *, int signr); diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index e9cc6fe1fc6f95..c1688c2d0a128f 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -7,6 +7,9 @@ #include #include +#define IRET_FRAME_OFFSET (offsetof(struct pt_regs, ip)) +#define IRET_FRAME_SIZE (sizeof(struct pt_regs) - IRET_FRAME_OFFSET) + struct unwind_state { struct stack_info stack_info; unsigned long stack_mask; @@ -52,6 +55,10 @@ void unwind_start(struct unwind_state *state, struct task_struct *task, } #if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER) +/* + * WARNING: The entire pt_regs may not be safe to dereference. In some cases, + * only the iret frame registers are accessible. Use with caution! + */ static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) { if (unwind_done(state)) diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index f13b4c00a5de4b..0bc95be5c6385c 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -50,6 +50,28 @@ static void printk_stack_address(unsigned long address, int reliable, printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address); } +void show_iret_regs(struct pt_regs *regs) +{ + printk(KERN_DEFAULT "RIP: %04x:%pS\n", (int)regs->cs, (void *)regs->ip); + printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss, + regs->sp, regs->flags); +} + +static void show_regs_safe(struct stack_info *info, struct pt_regs *regs) +{ + if (on_stack(info, regs, sizeof(*regs))) + __show_regs(regs, 0); + else if (on_stack(info, (void *)regs + IRET_FRAME_OFFSET, + IRET_FRAME_SIZE)) { + /* + * When an interrupt or exception occurs in entry code, the + * full pt_regs might not have been saved yet. In that case + * just print the iret frame. + */ + show_iret_regs(regs); + } +} + void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, char *log_lvl) { @@ -94,8 +116,8 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, if (stack_name) printk("%s <%s>\n", log_lvl, stack_name); - if (regs && on_stack(&stack_info, regs, sizeof(*regs))) - __show_regs(regs, 0); + if (regs) + show_regs_safe(&stack_info, regs); /* * Scan the stack, printing any text addresses we find. At the @@ -119,7 +141,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, /* * Don't print regs->ip again if it was already printed - * by __show_regs() below. + * by show_regs_safe() below. */ if (regs && stack == ®s->ip) goto next; @@ -155,8 +177,8 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, /* if the frame has entry regs, print them */ regs = unwind_get_entry_regs(&state); - if (regs && on_stack(&stack_info, regs, sizeof(*regs))) - __show_regs(regs, 0); + if (regs) + show_regs_safe(&stack_info, regs); } if (stack_name) diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index eeeb34f85c250e..01b119bebb6801 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -69,9 +69,8 @@ void __show_regs(struct pt_regs *regs, int all) unsigned int fsindex, gsindex; unsigned int ds, cs, es; - printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs, (void *)regs->ip); - printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss, - regs->sp, regs->flags); + show_iret_regs(regs); + if (regs->orig_ax != -1) pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax); else @@ -88,6 +87,9 @@ void __show_regs(struct pt_regs *regs, int all) printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", regs->r13, regs->r14, regs->r15); + if (!all) + return; + asm("movl %%ds,%0" : "=r" (ds)); asm("movl %%cs,%0" : "=r" (cs)); asm("movl %%es,%0" : "=r" (es)); @@ -98,9 +100,6 @@ void __show_regs(struct pt_regs *regs, int all) rdmsrl(MSR_GS_BASE, gs); rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); - if (!all) - return; - cr0 = read_cr0(); cr2 = read_cr2(); cr3 = __read_cr3(); diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index ff8e1132b2ae41..be86a865087a6b 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -253,22 +253,15 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state) return NULL; } -static bool stack_access_ok(struct unwind_state *state, unsigned long addr, +static bool stack_access_ok(struct unwind_state *state, unsigned long _addr, size_t len) { struct stack_info *info = &state->stack_info; + void *addr = (void *)_addr; - /* - * If the address isn't on the current stack, switch to the next one. - * - * We may have to traverse multiple stacks to deal with the possibility - * that info->next_sp could point to an empty stack and the address - * could be on a subsequent stack. - */ - while (!on_stack(info, (void *)addr, len)) - if (get_stack_info(info->next_sp, state->task, info, - &state->stack_mask)) - return false; + if (!on_stack(info, addr, len) && + (get_stack_info(addr, state->task, info, &state->stack_mask))) + return false; return true; } @@ -283,42 +276,32 @@ static bool deref_stack_reg(struct unwind_state *state, unsigned long addr, return true; } -#define REGS_SIZE (sizeof(struct pt_regs)) -#define SP_OFFSET (offsetof(struct pt_regs, sp)) -#define IRET_REGS_SIZE (REGS_SIZE - offsetof(struct pt_regs, ip)) -#define IRET_SP_OFFSET (SP_OFFSET - offsetof(struct pt_regs, ip)) - static bool deref_stack_regs(struct unwind_state *state, unsigned long addr, - unsigned long *ip, unsigned long *sp, bool full) + unsigned long *ip, unsigned long *sp) { - size_t regs_size = full ? REGS_SIZE : IRET_REGS_SIZE; - size_t sp_offset = full ? SP_OFFSET : IRET_SP_OFFSET; - struct pt_regs *regs = (struct pt_regs *)(addr + regs_size - REGS_SIZE); - - if (IS_ENABLED(CONFIG_X86_64)) { - if (!stack_access_ok(state, addr, regs_size)) - return false; - - *ip = regs->ip; - *sp = regs->sp; + struct pt_regs *regs = (struct pt_regs *)addr; - return true; - } + /* x86-32 support will be more complicated due to the ®s->sp hack */ + BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32)); - if (!stack_access_ok(state, addr, sp_offset)) + if (!stack_access_ok(state, addr, sizeof(struct pt_regs))) return false; *ip = regs->ip; + *sp = regs->sp; + return true; +} - if (user_mode(regs)) { - if (!stack_access_ok(state, addr + sp_offset, - REGS_SIZE - SP_OFFSET)) - return false; +static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr, + unsigned long *ip, unsigned long *sp) +{ + struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET; - *sp = regs->sp; - } else - *sp = (unsigned long)®s->sp; + if (!stack_access_ok(state, addr, IRET_FRAME_SIZE)) + return false; + *ip = regs->ip; + *sp = regs->sp; return true; } @@ -327,7 +310,6 @@ bool unwind_next_frame(struct unwind_state *state) unsigned long ip_p, sp, orig_ip, prev_sp = state->sp; enum stack_type prev_type = state->stack_info.type; struct orc_entry *orc; - struct pt_regs *ptregs; bool indirect = false; if (unwind_done(state)) @@ -435,7 +417,7 @@ bool unwind_next_frame(struct unwind_state *state) break; case ORC_TYPE_REGS: - if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) { + if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) { orc_warn("can't dereference registers at %p for ip %pB\n", (void *)sp, (void *)orig_ip); goto done; @@ -447,20 +429,14 @@ bool unwind_next_frame(struct unwind_state *state) break; case ORC_TYPE_REGS_IRET: - if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) { + if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) { orc_warn("can't dereference iret registers at %p for ip %pB\n", (void *)sp, (void *)orig_ip); goto done; } - ptregs = container_of((void *)sp, struct pt_regs, ip); - if ((unsigned long)ptregs >= prev_sp && - on_stack(&state->stack_info, ptregs, REGS_SIZE)) { - state->regs = ptregs; - state->full_regs = false; - } else - state->regs = NULL; - + state->regs = (void *)sp - IRET_FRAME_OFFSET; + state->full_regs = false; state->signal = true; break; From 996d087af015da99632fde450092923d574d4bbc Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:10 +0100 Subject: [PATCH 0880/3326] x86/irq: Remove an old outdated comment about context tracking races commit 6669a692605547892a026445e460bf233958bd7f upstream. That race has been fixed and code cleaned up for a while now. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Reviewed-by: Thomas Gleixner Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150605.150551639@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/irq.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 52089c043160b5..aa9d51eea9d0e8 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -219,18 +219,6 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) /* high bit used in ret_from_ code */ unsigned vector = ~regs->orig_ax; - /* - * NB: Unlike exception entries, IRQ entries do not reliably - * handle context tracking in the low-level entry code. This is - * because syscall entries execute briefly with IRQs on before - * updating context tracking state, so we can take an IRQ from - * kernel mode with CONTEXT_USER. The low-level entry code only - * updates the context if we came from user mode, so we won't - * switch to CONTEXT_KERNEL. We'll fix that once the syscall - * code is cleaned up enough that we can cleanly defer enabling - * IRQs. - */ - entering_irq(); /* entering_irq() tells RCU that we're not quiescent. Check it. */ From e9b7b111e5be68e41e1984bcaa03535cb5e76141 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:11 +0100 Subject: [PATCH 0881/3326] x86/irq/64: Print the offending IP in the stack overflow warning commit 4f3789e792296e21405f708cf3cb409d7c7d5683 upstream. In case something goes wrong with unwind (not unlikely in case of overflow), print the offending IP where we detected the overflow. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Reviewed-by: Thomas Gleixner Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150605.231677119@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/irq_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 020efbf5786b35..d86e344f5b3deb 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -57,10 +57,10 @@ static inline void stack_overflow_check(struct pt_regs *regs) if (regs->sp >= estack_top && regs->sp <= estack_bottom) return; - WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n", + WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx,ip:%pF)\n", current->comm, curbase, regs->sp, irq_stack_top, irq_stack_bottom, - estack_top, estack_bottom); + estack_top, estack_bottom, (void *)regs->ip); if (sysctl_panic_on_stackoverflow) panic("low stack detected by irq handler - check messages\n"); From 9b654aba0360c10baa7cc8d15f976e7ca1af44ac Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:12 +0100 Subject: [PATCH 0882/3326] x86/entry/64: Allocate and enable the SYSENTER stack commit 1a79797b58cddfa948420a7553241c79c013e3ca upstream. This will simplify future changes that want scratch variables early in the SYSENTER handler -- they'll be able to spill registers to the stack. It also lets us get rid of a SWAPGS_UNSAFE_STACK user. This does not depend on CONFIG_IA32_EMULATION=y because we'll want the stack space even without IA32 emulation. As far as I can tell, the reason that this wasn't done from day 1 is that we use IST for #DB and #BP, which is IMO rather nasty and causes a lot more problems than it solves. But, since #DB uses IST, we don't actually need a real stack for SYSENTER (because SYSENTER with TF set will invoke #DB on the IST stack rather than the SYSENTER stack). I want to remove IST usage from these vectors some day, and this patch is a prerequisite for that as well. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150605.312726423@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64_compat.S | 2 +- arch/x86/include/asm/processor.h | 3 --- arch/x86/kernel/asm-offsets.c | 5 +++++ arch/x86/kernel/asm-offsets_32.c | 5 ----- arch/x86/kernel/cpu/common.c | 4 +++- arch/x86/kernel/process.c | 2 -- arch/x86/kernel/traps.c | 3 +-- 7 files changed, 10 insertions(+), 14 deletions(-) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 568e130d932cd2..dcc6987f9bae16 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -48,7 +48,7 @@ */ ENTRY(entry_SYSENTER_compat) /* Interrupts are off on entry. */ - SWAPGS_UNSAFE_STACK + SWAPGS movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 2db7cf720b04b2..789dad5da20f47 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -339,14 +339,11 @@ struct tss_struct { */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; -#ifdef CONFIG_X86_32 /* * Space for the temporary SYSENTER stack. */ unsigned long SYSENTER_stack_canary; unsigned long SYSENTER_stack[64]; -#endif - } ____cacheline_aligned; DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 8ea78275480daf..b275863128ebf0 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -93,4 +93,9 @@ void common(void) { BLANK(); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); + + /* Offset from cpu_tss to SYSENTER_stack */ + OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack); + /* Size of SYSENTER_stack */ + DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack)); } diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index dedf428b20b68b..52ce4ea16e5386 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -50,11 +50,6 @@ void foo(void) DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - offsetofend(struct tss_struct, SYSENTER_stack)); - /* Offset from cpu_tss to SYSENTER_stack */ - OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack); - /* Size of SYSENTER_stack */ - DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack)); - #ifdef CONFIG_CC_STACKPROTECTOR BLANK(); OFFSET(stack_canary_offset, stack_canary, canary); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index cdf79ab628c244..22f5421701983e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1361,7 +1361,9 @@ void syscall_init(void) * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). */ wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); - wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); + wrmsrl_safe(MSR_IA32_SYSENTER_ESP, + (unsigned long)this_cpu_ptr(&cpu_tss) + + offsetofend(struct tss_struct, SYSENTER_stack)); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); #else wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 97fb3e5737f5d0..35d674157fdae6 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -71,9 +71,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { */ .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, #endif -#ifdef CONFIG_X86_32 .SYSENTER_stack_canary = STACK_END_MAGIC, -#endif }; EXPORT_PER_CPU_SYMBOL(cpu_tss); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index d366adfc61dabe..d3e3bbd5d3a063 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -794,14 +794,13 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) debug_stack_usage_dec(); exit: -#if defined(CONFIG_X86_32) /* * This is the most likely code path that involves non-trivial use * of the SYSENTER stack. Check that we haven't overrun it. */ WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC, "Overran or corrupted SYSENTER stack\n"); -#endif + ist_exit(regs); } NOKPROBE_SYMBOL(do_debug); From 2329da3fc03d19a8fc3bb212972b97e9f57abb32 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:13 +0100 Subject: [PATCH 0883/3326] x86/dumpstack: Add get_stack_info() support for the SYSENTER stack commit 33a2f1a6c4d7c0a02d1c006fb0379cc5ca3b96bb upstream. get_stack_info() doesn't currently know about the SYSENTER stack, so unwinding will fail if we entered the kernel on the SYSENTER stack and haven't fully switched off. Teach get_stack_info() about the SYSENTER stack. With future patches applied that run part of the entry code on the SYSENTER stack and introduce an intentional BUG(), I would get: PANIC: double fault, error_code: 0x0 ... RIP: 0010:do_error_trap+0x33/0x1c0 ... Call Trace: Code: ... With this patch, I get: PANIC: double fault, error_code: 0x0 ... Call Trace: ? async_page_fault+0x36/0x60 ? invalid_op+0x22/0x40 ? async_page_fault+0x36/0x60 ? sync_regs+0x3c/0x40 ? sync_regs+0x2e/0x40 ? error_entry+0x6c/0xd0 ? async_page_fault+0x36/0x60 Code: ... which is a lot more informative. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150605.392711508@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/stacktrace.h | 3 +++ arch/x86/kernel/dumpstack.c | 19 +++++++++++++++++++ arch/x86/kernel/dumpstack_32.c | 6 ++++++ arch/x86/kernel/dumpstack_64.c | 6 ++++++ 4 files changed, 34 insertions(+) diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index 8da111b3c342bb..f8062bfd43a072 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -16,6 +16,7 @@ enum stack_type { STACK_TYPE_TASK, STACK_TYPE_IRQ, STACK_TYPE_SOFTIRQ, + STACK_TYPE_SYSENTER, STACK_TYPE_EXCEPTION, STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1, }; @@ -28,6 +29,8 @@ struct stack_info { bool in_task_stack(unsigned long *stack, struct task_struct *task, struct stack_info *info); +bool in_sysenter_stack(unsigned long *stack, struct stack_info *info); + int get_stack_info(unsigned long *stack, struct task_struct *task, struct stack_info *info, unsigned long *visit_mask); diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 0bc95be5c6385c..a33a1373a2522f 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -43,6 +43,25 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task, return true; } +bool in_sysenter_stack(unsigned long *stack, struct stack_info *info) +{ + struct tss_struct *tss = this_cpu_ptr(&cpu_tss); + + /* Treat the canary as part of the stack for unwinding purposes. */ + void *begin = &tss->SYSENTER_stack_canary; + void *end = (void *)&tss->SYSENTER_stack + sizeof(tss->SYSENTER_stack); + + if ((void *)stack < begin || (void *)stack >= end) + return false; + + info->type = STACK_TYPE_SYSENTER; + info->begin = begin; + info->end = end; + info->next_sp = NULL; + + return true; +} + static void printk_stack_address(unsigned long address, int reliable, char *log_lvl) { diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index daefae83a3aa86..5ff13a6b368069 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -26,6 +26,9 @@ const char *stack_type_name(enum stack_type type) if (type == STACK_TYPE_SOFTIRQ) return "SOFTIRQ"; + if (type == STACK_TYPE_SYSENTER) + return "SYSENTER"; + return NULL; } @@ -93,6 +96,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, if (task != current) goto unknown; + if (in_sysenter_stack(stack, info)) + goto recursion_check; + if (in_hardirq_stack(stack, info)) goto recursion_check; diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 88ce2ffdb11030..abc828f8c29785 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -37,6 +37,9 @@ const char *stack_type_name(enum stack_type type) if (type == STACK_TYPE_IRQ) return "IRQ"; + if (type == STACK_TYPE_SYSENTER) + return "SYSENTER"; + if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST) return exception_stack_names[type - STACK_TYPE_EXCEPTION]; @@ -115,6 +118,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, if (in_irq_stack(stack, info)) goto recursion_check; + if (in_sysenter_stack(stack, info)) + goto recursion_check; + goto unknown; recursion_check: From 5684dd300f675a9f5ad6a43f96f128839c98a7fd Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:14 +0100 Subject: [PATCH 0884/3326] x86/entry/gdt: Put per-CPU GDT remaps in ascending order commit aaeed3aeb39c1ba69f0a49baec8cb728121d0a91 upstream. We currently have CPU 0's GDT at the top of the GDT range and higher-numbered CPUs at lower addresses. This happens because the fixmap is upside down (index 0 is the top of the fixmap). Flip it so that GDTs are in ascending order by virtual address. This will simplify a future patch that will generalize the GDT remap to contain multiple pages. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Reviewed-by: Thomas Gleixner Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150605.471561421@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/desc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 0a3e808b91230d..01fd944fd72118 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -63,7 +63,7 @@ static inline struct desc_struct *get_current_gdt_rw(void) /* Get the fixmap index for a specific processor */ static inline unsigned int get_cpu_gdt_ro_index(int cpu) { - return FIX_GDT_REMAP_BEGIN + cpu; + return FIX_GDT_REMAP_END - cpu; } /* Provide the fixmap address of the remapped GDT */ From ece614dcfd964ab04fb37186ea2c4b020dbc3ee9 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:15 +0100 Subject: [PATCH 0885/3326] x86/mm/fixmap: Generalize the GDT fixmap mechanism, introduce struct cpu_entry_area commit ef8813ab280507972bb57e4b1b502811ad4411e9 upstream. Currently, the GDT is an ad-hoc array of pages, one per CPU, in the fixmap. Generalize it to be an array of a new 'struct cpu_entry_area' so that we can cleanly add new things to it. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150605.563271721@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/desc.h | 9 +-------- arch/x86/include/asm/fixmap.h | 37 +++++++++++++++++++++++++++++++++-- arch/x86/kernel/cpu/common.c | 14 ++++++------- arch/x86/xen/mmu_pv.c | 2 +- 4 files changed, 44 insertions(+), 18 deletions(-) diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 01fd944fd72118..f6f428432a68d3 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -60,17 +60,10 @@ static inline struct desc_struct *get_current_gdt_rw(void) return this_cpu_ptr(&gdt_page)->gdt; } -/* Get the fixmap index for a specific processor */ -static inline unsigned int get_cpu_gdt_ro_index(int cpu) -{ - return FIX_GDT_REMAP_END - cpu; -} - /* Provide the fixmap address of the remapped GDT */ static inline struct desc_struct *get_cpu_gdt_ro(int cpu) { - unsigned int idx = get_cpu_gdt_ro_index(cpu); - return (struct desc_struct *)__fix_to_virt(idx); + return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt; } /* Provide the current read-only GDT */ diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index b0c505fe9a958c..b61f0242f9d057 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -44,6 +44,19 @@ extern unsigned long __FIXADDR_TOP; PAGE_SIZE) #endif +/* + * cpu_entry_area is a percpu region in the fixmap that contains things + * needed by the CPU and early entry/exit code. Real types aren't used + * for all fields here to avoid circular header dependencies. + * + * Every field is a virtual alias of some other allocated backing store. + * There is no direct allocation of a struct cpu_entry_area. + */ +struct cpu_entry_area { + char gdt[PAGE_SIZE]; +}; + +#define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE) /* * Here we define all the compile-time 'special' virtual @@ -101,8 +114,8 @@ enum fixed_addresses { FIX_LNW_VRTC, #endif /* Fixmap entries to remap the GDTs, one per processor. */ - FIX_GDT_REMAP_BEGIN, - FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1, + FIX_CPU_ENTRY_AREA_TOP, + FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1, #ifdef CONFIG_ACPI_APEI_GHES /* Used for GHES mapping from assorted contexts */ @@ -191,5 +204,25 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, void __early_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags); +static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page) +{ + BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); + + return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page; +} + +#define __get_cpu_entry_area_offset_index(cpu, offset) ({ \ + BUILD_BUG_ON(offset % PAGE_SIZE != 0); \ + __get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE); \ + }) + +#define get_cpu_entry_area_index(cpu, field) \ + __get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field)) + +static inline struct cpu_entry_area *get_cpu_entry_area(int cpu) +{ + return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0)); +} + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_FIXMAP_H */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 22f5421701983e..2cb394dc41537f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -466,12 +466,12 @@ void load_percpu_segment(int cpu) load_stack_canary_segment(); } -/* Setup the fixmap mapping only once per-processor */ -static inline void setup_fixmap_gdt(int cpu) +/* Setup the fixmap mappings only once per-processor */ +static inline void setup_cpu_entry_area(int cpu) { #ifdef CONFIG_X86_64 /* On 64-bit systems, we use a read-only fixmap GDT. */ - pgprot_t prot = PAGE_KERNEL_RO; + pgprot_t gdt_prot = PAGE_KERNEL_RO; #else /* * On native 32-bit systems, the GDT cannot be read-only because @@ -482,11 +482,11 @@ static inline void setup_fixmap_gdt(int cpu) * On Xen PV, the GDT must be read-only because the hypervisor requires * it. */ - pgprot_t prot = boot_cpu_has(X86_FEATURE_XENPV) ? + pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ? PAGE_KERNEL_RO : PAGE_KERNEL; #endif - __set_fixmap(get_cpu_gdt_ro_index(cpu), get_cpu_gdt_paddr(cpu), prot); + __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); } /* Load the original GDT from the per-cpu structure */ @@ -1589,7 +1589,7 @@ void cpu_init(void) if (is_uv_system()) uv_cpu_init(); - setup_fixmap_gdt(cpu); + setup_cpu_entry_area(cpu); load_fixmap_gdt(cpu); } @@ -1651,7 +1651,7 @@ void cpu_init(void) fpu__init_cpu(); - setup_fixmap_gdt(cpu); + setup_cpu_entry_area(cpu); load_fixmap_gdt(cpu); } #endif diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 2ccdaba31a0778..c2454237fa672d 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -2272,7 +2272,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) #endif case FIX_TEXT_POKE0: case FIX_TEXT_POKE1: - case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END: + case FIX_CPU_ENTRY_AREA_TOP ... FIX_CPU_ENTRY_AREA_BOTTOM: /* All local page mappings */ pte = pfn_pte(phys, prot); break; From 487f3ddcd9863d38490ca4f6f2b9e67d4f8a197c Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:16 +0100 Subject: [PATCH 0886/3326] x86/kasan/64: Teach KASAN about the cpu_entry_area commit 21506525fb8ddb0342f2a2370812d47f6a1f3833 upstream. The cpu_entry_area will contain stacks. Make sure that KASAN has appropriate shadow mappings for them. Signed-off-by: Andy Lutomirski Signed-off-by: Andrey Ryabinin Signed-off-by: Thomas Gleixner Cc: Alexander Potapenko Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Dmitry Vyukov Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: kasan-dev@googlegroups.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150605.642806442@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/kasan_init_64.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 99dfed6dfef8b2..9ec70d780f1f41 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -277,6 +277,7 @@ void __init kasan_early_init(void) void __init kasan_init(void) { int i; + void *shadow_cpu_entry_begin, *shadow_cpu_entry_end; #ifdef CONFIG_KASAN_INLINE register_die_notifier(&kasan_die_notifier); @@ -329,8 +330,23 @@ void __init kasan_init(void) (unsigned long)kasan_mem_to_shadow(_end), early_pfn_to_nid(__pa(_stext))); + shadow_cpu_entry_begin = (void *)__fix_to_virt(FIX_CPU_ENTRY_AREA_BOTTOM); + shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); + shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin, + PAGE_SIZE); + + shadow_cpu_entry_end = (void *)(__fix_to_virt(FIX_CPU_ENTRY_AREA_TOP) + PAGE_SIZE); + shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); + shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end, + PAGE_SIZE); + kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), - (void *)KASAN_SHADOW_END); + shadow_cpu_entry_begin); + + kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, + (unsigned long)shadow_cpu_entry_end, 0); + + kasan_populate_zero_shadow(shadow_cpu_entry_end, (void *)KASAN_SHADOW_END); load_cr3(init_top_pgt); __flush_tlb_all(); From 5be136953f62c551c1f40ddbb83c2f4e8fc9c41f Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:17 +0100 Subject: [PATCH 0887/3326] x86/entry: Fix assumptions that the HW TSS is at the beginning of cpu_tss commit 7fb983b4dd569e08564134a850dfd4eb1c63d9b8 upstream. A future patch will move SYSENTER_stack to the beginning of cpu_tss to help detect overflow. Before this can happen, fix several code paths that hardcode assumptions about the old layout. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Reviewed-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150605.722425540@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/desc.h | 2 +- arch/x86/include/asm/processor.h | 9 ++++++-- arch/x86/kernel/cpu/common.c | 8 +++---- arch/x86/kernel/doublefault.c | 36 +++++++++++++++----------------- arch/x86/kvm/vmx.c | 2 +- arch/x86/power/cpu.c | 13 ++++++------ 6 files changed, 37 insertions(+), 33 deletions(-) diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index f6f428432a68d3..2ace1f90d13830 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -178,7 +178,7 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr, #endif } -static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) +static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr) { struct desc_struct *d = get_cpu_gdt_rw(cpu); tss_desc tss; diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 789dad5da20f47..555c9478f3df5a 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -162,7 +162,7 @@ enum cpuid_regs_idx { extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; -extern struct tss_struct doublefault_tss; +extern struct x86_hw_tss doublefault_tss; extern __u32 cpu_caps_cleared[NCAPINTS]; extern __u32 cpu_caps_set[NCAPINTS]; @@ -252,6 +252,11 @@ static inline void load_cr3(pgd_t *pgdir) write_cr3(__sme_pa(pgdir)); } +/* + * Note that while the legacy 'TSS' name comes from 'Task State Segment', + * on modern x86 CPUs the TSS also holds information important to 64-bit mode, + * unrelated to the task-switch mechanism: + */ #ifdef CONFIG_X86_32 /* This is the TSS defined by the hardware. */ struct x86_hw_tss { @@ -322,7 +327,7 @@ struct x86_hw_tss { #define IO_BITMAP_BITS 65536 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) -#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) +#define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss)) #define INVALID_IO_BITMAP_OFFSET 0x8000 struct tss_struct { diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 2cb394dc41537f..3f285b973f500f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1557,7 +1557,7 @@ void cpu_init(void) } } - t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); + t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; /* * <= is required because the CPU will access up to @@ -1576,7 +1576,7 @@ void cpu_init(void) * Initialize the TSS. Don't bother initializing sp0, as the initial * task never enters user mode. */ - set_tss_desc(cpu, t); + set_tss_desc(cpu, &t->x86_tss); load_TR_desc(); load_mm_ldt(&init_mm); @@ -1634,12 +1634,12 @@ void cpu_init(void) * Initialize the TSS. Don't bother initializing sp0, as the initial * task never enters user mode. */ - set_tss_desc(cpu, t); + set_tss_desc(cpu, &t->x86_tss); load_TR_desc(); load_mm_ldt(&init_mm); - t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); + t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; #ifdef CONFIG_DOUBLEFAULT /* Set up doublefault TSS pointer in the GDT */ diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c index 0e662c55ae902f..0b8cedb20d6d92 100644 --- a/arch/x86/kernel/doublefault.c +++ b/arch/x86/kernel/doublefault.c @@ -50,25 +50,23 @@ static void doublefault_fn(void) cpu_relax(); } -struct tss_struct doublefault_tss __cacheline_aligned = { - .x86_tss = { - .sp0 = STACK_START, - .ss0 = __KERNEL_DS, - .ldt = 0, - .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, - - .ip = (unsigned long) doublefault_fn, - /* 0x2 bit is always set */ - .flags = X86_EFLAGS_SF | 0x2, - .sp = STACK_START, - .es = __USER_DS, - .cs = __KERNEL_CS, - .ss = __KERNEL_DS, - .ds = __USER_DS, - .fs = __KERNEL_PERCPU, - - .__cr3 = __pa_nodebug(swapper_pg_dir), - } +struct x86_hw_tss doublefault_tss __cacheline_aligned = { + .sp0 = STACK_START, + .ss0 = __KERNEL_DS, + .ldt = 0, + .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, + + .ip = (unsigned long) doublefault_fn, + /* 0x2 bit is always set */ + .flags = X86_EFLAGS_SF | 0x2, + .sp = STACK_START, + .es = __USER_DS, + .cs = __KERNEL_CS, + .ss = __KERNEL_DS, + .ds = __USER_DS, + .fs = __KERNEL_PERCPU, + + .__cr3 = __pa_nodebug(swapper_pg_dir), }; /* dummy for do_double_fault() call */ diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index bc5921c1e2f23d..b50b02d23f8ab5 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2295,7 +2295,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) * processors. See 22.2.4. */ vmcs_writel(HOST_TR_BASE, - (unsigned long)this_cpu_ptr(&cpu_tss)); + (unsigned long)this_cpu_ptr(&cpu_tss.x86_tss)); vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ /* diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 84fcfde53f8f3f..50593e13828109 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -165,12 +165,13 @@ static void fix_processor_context(void) struct desc_struct *desc = get_cpu_gdt_rw(cpu); tss_desc tss; #endif - set_tss_desc(cpu, t); /* - * This just modifies memory; should not be - * necessary. But... This is necessary, because - * 386 hardware has concept of busy TSS or some - * similar stupidity. - */ + + /* + * This just modifies memory; should not be necessary. But... This is + * necessary, because 386 hardware has concept of busy TSS or some + * similar stupidity. + */ + set_tss_desc(cpu, &t->x86_tss); #ifdef CONFIG_X86_64 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc)); From 41964ef17cce3f2497c1ec428d22b42fb615da8d Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:18 +0100 Subject: [PATCH 0888/3326] x86/dumpstack: Handle stack overflow on all stacks commit 6e60e583426c2f8751c22c2dfe5c207083b4483a upstream. We currently special-case stack overflow on the task stack. We're going to start putting special stacks in the fixmap with a custom layout, so they'll have guard pages, too. Teach the unwinder to be able to unwind an overflow of any of the stacks. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150605.802057305@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/dumpstack.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index a33a1373a2522f..64f8ed2a4827d3 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -112,24 +112,28 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, * - task stack * - interrupt stack * - HW exception stacks (double fault, nmi, debug, mce) + * - SYSENTER stack * - * x86-32 can have up to three stacks: + * x86-32 can have up to four stacks: * - task stack * - softirq stack * - hardirq stack + * - SYSENTER stack */ for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { const char *stack_name; - /* - * If we overflowed the task stack into a guard page, jump back - * to the bottom of the usable stack. - */ - if (task_stack_page(task) - (void *)stack < PAGE_SIZE) - stack = task_stack_page(task); - - if (get_stack_info(stack, task, &stack_info, &visit_mask)) - break; + if (get_stack_info(stack, task, &stack_info, &visit_mask)) { + /* + * We weren't on a valid stack. It's possible that + * we overflowed a valid stack into a guard page. + * See if the next page up is valid so that we can + * generate some kind of backtrace if this happens. + */ + stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack); + if (get_stack_info(stack, task, &stack_info, &visit_mask)) + break; + } stack_name = stack_type_name(stack_info.type); if (stack_name) From 969f5706f6f51a4a01a0dfedfa405b33e25fb708 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:19 +0100 Subject: [PATCH 0889/3326] x86/entry: Move SYSENTER_stack to the beginning of struct tss_struct commit 1a935bc3d4ea61556461a9e92a68ca3556232efd upstream. SYSENTER_stack should have reliable overflow detection, which means that it needs to be at the bottom of a page, not the top. Move it to the beginning of struct tss_struct and page-align it. Also add an assertion to make sure that the fixed hardware TSS doesn't cross a page boundary. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150605.881827433@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/processor.h | 21 ++++++++++++--------- arch/x86/kernel/cpu/common.c | 21 +++++++++++++++++++++ 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 555c9478f3df5a..759051251664c6 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -332,7 +332,16 @@ struct x86_hw_tss { struct tss_struct { /* - * The hardware state: + * Space for the temporary SYSENTER stack, used for SYSENTER + * and the entry trampoline as well. + */ + unsigned long SYSENTER_stack_canary; + unsigned long SYSENTER_stack[64]; + + /* + * The fixed hardware portion. This must not cross a page boundary + * at risk of violating the SDM's advice and potentially triggering + * errata. */ struct x86_hw_tss x86_tss; @@ -343,15 +352,9 @@ struct tss_struct { * be within the limit. */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; +} __aligned(PAGE_SIZE); - /* - * Space for the temporary SYSENTER stack. - */ - unsigned long SYSENTER_stack_canary; - unsigned long SYSENTER_stack[64]; -} ____cacheline_aligned; - -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); +DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss); /* * sizeof(unsigned long) coming from an extra "long" at the end diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 3f285b973f500f..60b2dfd2a58b0d 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -487,6 +487,27 @@ static inline void setup_cpu_entry_area(int cpu) #endif __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); + + /* + * The Intel SDM says (Volume 3, 7.2.1): + * + * Avoid placing a page boundary in the part of the TSS that the + * processor reads during a task switch (the first 104 bytes). The + * processor may not correctly perform address translations if a + * boundary occurs in this area. During a task switch, the processor + * reads and writes into the first 104 bytes of each TSS (using + * contiguous physical addresses beginning with the physical address + * of the first byte of the TSS). So, after TSS access begins, if + * part of the 104 bytes is not physically contiguous, the processor + * will access incorrect information without generating a page-fault + * exception. + * + * There are also a lot of errata involving the TSS spanning a page + * boundary. Assert that we're not doing that. + */ + BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ + offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); + } /* Load the original GDT from the per-cpu structure */ From 5bb40c6d4c2a453b21e8e7c2b9bae87e77c4d878 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:20 +0100 Subject: [PATCH 0890/3326] x86/entry: Remap the TSS into the CPU entry area commit 72f5e08dbba2d01aa90b592cf76c378ea233b00b upstream. This has a secondary purpose: it puts the entry stack into a region with a well-controlled layout. A subsequent patch will take advantage of this to streamline the SYSCALL entry code to be able to find it more easily. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150605.962042855@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_32.S | 6 +++-- arch/x86/include/asm/fixmap.h | 7 ++++++ arch/x86/kernel/asm-offsets.c | 3 +++ arch/x86/kernel/cpu/common.c | 41 ++++++++++++++++++++++++++++++----- arch/x86/kernel/dumpstack.c | 3 ++- arch/x86/kvm/vmx.c | 2 +- arch/x86/power/cpu.c | 11 +++++----- 7 files changed, 58 insertions(+), 15 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 4838037f97f6ed..0ab316c4680673 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -941,7 +941,8 @@ ENTRY(debug) movl %esp, %eax # pt_regs pointer /* Are we currently on the SYSENTER stack? */ - PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx) + movl PER_CPU_VAR(cpu_entry_area), %ecx + addl $CPU_ENTRY_AREA_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ cmpl $SIZEOF_SYSENTER_stack, %ecx jb .Ldebug_from_sysenter_stack @@ -984,7 +985,8 @@ ENTRY(nmi) movl %esp, %eax # pt_regs pointer /* Are we currently on the SYSENTER stack? */ - PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx) + movl PER_CPU_VAR(cpu_entry_area), %ecx + addl $CPU_ENTRY_AREA_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ cmpl $SIZEOF_SYSENTER_stack, %ecx jb .Lnmi_from_sysenter_stack diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index b61f0242f9d057..84558b611ad33d 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -54,6 +54,13 @@ extern unsigned long __FIXADDR_TOP; */ struct cpu_entry_area { char gdt[PAGE_SIZE]; + + /* + * The GDT is just below cpu_tss and thus serves (on x86_64) as a + * a read-only guard page for the SYSENTER stack at the bottom + * of the TSS region. + */ + struct tss_struct tss; }; #define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE) diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index b275863128ebf0..55858b277cf620 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -98,4 +98,7 @@ void common(void) { OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack); /* Size of SYSENTER_stack */ DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack)); + + /* Layout info for cpu_entry_area */ + OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss); } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 60b2dfd2a58b0d..e5837bd6c67251 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -466,6 +466,22 @@ void load_percpu_segment(int cpu) load_stack_canary_segment(); } +static void set_percpu_fixmap_pages(int fixmap_index, void *ptr, + int pages, pgprot_t prot) +{ + int i; + + for (i = 0; i < pages; i++) { + __set_fixmap(fixmap_index - i, + per_cpu_ptr_to_phys(ptr + i * PAGE_SIZE), prot); + } +} + +#ifdef CONFIG_X86_32 +/* The 32-bit entry code needs to find cpu_entry_area. */ +DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); +#endif + /* Setup the fixmap mappings only once per-processor */ static inline void setup_cpu_entry_area(int cpu) { @@ -507,7 +523,15 @@ static inline void setup_cpu_entry_area(int cpu) */ BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); + BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); + set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss), + &per_cpu(cpu_tss, cpu), + sizeof(struct tss_struct) / PAGE_SIZE, + PAGE_KERNEL); +#ifdef CONFIG_X86_32 + this_cpu_write(cpu_entry_area, get_cpu_entry_area(cpu)); +#endif } /* Load the original GDT from the per-cpu structure */ @@ -1257,7 +1281,8 @@ void enable_sep_cpu(void) wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); wrmsr(MSR_IA32_SYSENTER_ESP, - (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack), + (unsigned long)&get_cpu_entry_area(cpu)->tss + + offsetofend(struct tss_struct, SYSENTER_stack), 0); wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); @@ -1370,6 +1395,8 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks /* May not be marked __init: used by software suspend */ void syscall_init(void) { + int cpu = smp_processor_id(); + wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); @@ -1383,7 +1410,7 @@ void syscall_init(void) */ wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); wrmsrl_safe(MSR_IA32_SYSENTER_ESP, - (unsigned long)this_cpu_ptr(&cpu_tss) + + (unsigned long)&get_cpu_entry_area(cpu)->tss + offsetofend(struct tss_struct, SYSENTER_stack)); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); #else @@ -1593,11 +1620,13 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, me); + setup_cpu_entry_area(cpu); + /* * Initialize the TSS. Don't bother initializing sp0, as the initial * task never enters user mode. */ - set_tss_desc(cpu, &t->x86_tss); + set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); load_TR_desc(); load_mm_ldt(&init_mm); @@ -1610,7 +1639,6 @@ void cpu_init(void) if (is_uv_system()) uv_cpu_init(); - setup_cpu_entry_area(cpu); load_fixmap_gdt(cpu); } @@ -1651,11 +1679,13 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, curr); + setup_cpu_entry_area(cpu); + /* * Initialize the TSS. Don't bother initializing sp0, as the initial * task never enters user mode. */ - set_tss_desc(cpu, &t->x86_tss); + set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); load_TR_desc(); load_mm_ldt(&init_mm); @@ -1672,7 +1702,6 @@ void cpu_init(void) fpu__init_cpu(); - setup_cpu_entry_area(cpu); load_fixmap_gdt(cpu); } #endif diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 64f8ed2a4827d3..60267850125e68 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -45,7 +45,8 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task, bool in_sysenter_stack(unsigned long *stack, struct stack_info *info) { - struct tss_struct *tss = this_cpu_ptr(&cpu_tss); + int cpu = smp_processor_id(); + struct tss_struct *tss = &get_cpu_entry_area(cpu)->tss; /* Treat the canary as part of the stack for unwinding purposes. */ void *begin = &tss->SYSENTER_stack_canary; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b50b02d23f8ab5..47d9432756f34a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2295,7 +2295,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) * processors. See 22.2.4. */ vmcs_writel(HOST_TR_BASE, - (unsigned long)this_cpu_ptr(&cpu_tss.x86_tss)); + (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss); vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ /* diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 50593e13828109..04d5157fe7f8f3 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -160,18 +160,19 @@ static void do_fpu_end(void) static void fix_processor_context(void) { int cpu = smp_processor_id(); - struct tss_struct *t = &per_cpu(cpu_tss, cpu); #ifdef CONFIG_X86_64 struct desc_struct *desc = get_cpu_gdt_rw(cpu); tss_desc tss; #endif /* - * This just modifies memory; should not be necessary. But... This is - * necessary, because 386 hardware has concept of busy TSS or some - * similar stupidity. + * We need to reload TR, which requires that we change the + * GDT entry to indicate "available" first. + * + * XXX: This could probably all be replaced by a call to + * force_reload_TR(). */ - set_tss_desc(cpu, &t->x86_tss); + set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); #ifdef CONFIG_X86_64 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc)); From d120cd749ef9770ee98b708a83b49547dcf1c0e1 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:21 +0100 Subject: [PATCH 0891/3326] x86/entry/64: Separate cpu_current_top_of_stack from TSS.sp0 commit 9aaefe7b59ae00605256a7d6bd1c1456432495fc upstream. On 64-bit kernels, we used to assume that TSS.sp0 was the current top of stack. With the addition of an entry trampoline, this will no longer be the case. Store the current top of stack in TSS.sp1, which is otherwise unused but shares the same cacheline. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150606.050864668@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/processor.h | 18 +++++++++++++----- arch/x86/include/asm/thread_info.h | 2 +- arch/x86/kernel/asm-offsets_64.c | 1 + arch/x86/kernel/process.c | 10 ++++++++++ arch/x86/kernel/process_64.c | 1 + 5 files changed, 26 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 759051251664c6..b0cf0612a454d6 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -309,7 +309,13 @@ struct x86_hw_tss { struct x86_hw_tss { u32 reserved1; u64 sp0; + + /* + * We store cpu_current_top_of_stack in sp1 so it's always accessible. + * Linux does not use ring 1, so sp1 is not otherwise needed. + */ u64 sp1; + u64 sp2; u64 reserved2; u64 ist[7]; @@ -368,6 +374,8 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss); #ifdef CONFIG_X86_32 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); +#else +#define cpu_current_top_of_stack cpu_tss.x86_tss.sp1 #endif /* @@ -539,12 +547,12 @@ static inline void native_swapgs(void) static inline unsigned long current_top_of_stack(void) { -#ifdef CONFIG_X86_64 - return this_cpu_read_stable(cpu_tss.x86_tss.sp0); -#else - /* sp0 on x86_32 is special in and around vm86 mode. */ + /* + * We can't read directly from tss.sp0: sp0 on x86_32 is special in + * and around vm86 mode and sp0 on x86_64 is special because of the + * entry trampoline. + */ return this_cpu_read_stable(cpu_current_top_of_stack); -#endif } static inline bool on_thread_stack(void) diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 70f425947dc50f..44a04999791e05 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -207,7 +207,7 @@ static inline int arch_within_stack_frames(const void * const stack, #else /* !__ASSEMBLY__ */ #ifdef CONFIG_X86_64 -# define cpu_current_top_of_stack (cpu_tss + TSS_sp0) +# define cpu_current_top_of_stack (cpu_tss + TSS_sp1) #endif #endif diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index e3a5175a444bc0..bf51e51d808dd8 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -66,6 +66,7 @@ int main(void) OFFSET(TSS_ist, tss_struct, x86_tss.ist); OFFSET(TSS_sp0, tss_struct, x86_tss.sp0); + OFFSET(TSS_sp1, tss_struct, x86_tss.sp1); BLANK(); #ifdef CONFIG_CC_STACKPROTECTOR diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 35d674157fdae6..86e83762e3b378 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -56,6 +56,16 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { * Poison it. */ .sp0 = (1UL << (BITS_PER_LONG-1)) + 1, + +#ifdef CONFIG_X86_64 + /* + * .sp1 is cpu_current_top_of_stack. The init task never + * runs user code, but cpu_current_top_of_stack should still + * be well defined before the first context switch. + */ + .sp1 = TOP_OF_INIT_STACK, +#endif + #ifdef CONFIG_X86_32 .ss0 = __KERNEL_DS, .ss1 = __KERNEL_CS, diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 01b119bebb6801..157f8181691524 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -461,6 +461,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) * Switch the PDA and FPU contexts. */ this_cpu_write(current_task, next_p); + this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p)); /* Reload sp0. */ update_sp0(next_p); From c3dbef1bd0f7eb09daf49409ea533aa1b0eeb82e Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:22 +0100 Subject: [PATCH 0892/3326] x86/espfix/64: Stop assuming that pt_regs is on the entry stack commit 6d9256f0a89eaff97fca6006100bcaea8d1d8bdb upstream. When we start using an entry trampoline, a #GP from userspace will be delivered on the entry stack, not on the task stack. Fix the espfix64 #DF fixup to set up #GP according to TSS.SP0, rather than assuming that pt_regs + 1 == SP0. This won't change anything without an entry stack, but it will make the code continue to work when an entry stack is added. While we're at it, improve the comments to explain what's actually going on. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150606.130778051@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/traps.c | 37 ++++++++++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index d3e3bbd5d3a063..f0029d17b14b1e 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -348,9 +348,15 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) /* * If IRET takes a non-IST fault on the espfix64 stack, then we - * end up promoting it to a doublefault. In that case, modify - * the stack to make it look like we just entered the #GP - * handler from user space, similar to bad_iret. + * end up promoting it to a doublefault. In that case, take + * advantage of the fact that we're not using the normal (TSS.sp0) + * stack right now. We can write a fake #GP(0) frame at TSS.sp0 + * and then modify our own IRET frame so that, when we return, + * we land directly at the #GP(0) vector with the stack already + * set up according to its expectations. + * + * The net result is that our #GP handler will think that we + * entered from usermode with the bad user context. * * No need for ist_enter here because we don't use RCU. */ @@ -358,13 +364,26 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) regs->cs == __KERNEL_CS && regs->ip == (unsigned long)native_irq_return_iret) { - struct pt_regs *normal_regs = task_pt_regs(current); + struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1; + + /* + * regs->sp points to the failing IRET frame on the + * ESPFIX64 stack. Copy it to the entry stack. This fills + * in gpregs->ss through gpregs->ip. + * + */ + memmove(&gpregs->ip, (void *)regs->sp, 5*8); + gpregs->orig_ax = 0; /* Missing (lost) #GP error code */ - /* Fake a #GP(0) from userspace. */ - memmove(&normal_regs->ip, (void *)regs->sp, 5*8); - normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ + /* + * Adjust our frame so that we return straight to the #GP + * vector with the expected RSP value. This is safe because + * we won't enable interupts or schedule before we invoke + * general_protection, so nothing will clobber the stack + * frame we just set up. + */ regs->ip = (unsigned long)general_protection; - regs->sp = (unsigned long)&normal_regs->orig_ax; + regs->sp = (unsigned long)&gpregs->orig_ax; return; } @@ -389,7 +408,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) * * Processors update CR2 whenever a page fault is detected. If a * second page fault occurs while an earlier page fault is being - * deliv- ered, the faulting linear address of the second fault will + * delivered, the faulting linear address of the second fault will * overwrite the contents of CR2 (replacing the previous * address). These updates to CR2 occur even if the page fault * results in a double fault or occurs during the delivery of a From 2bc9fa0beaf10206a778f02e9e5cb62f50345b1a Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:23 +0100 Subject: [PATCH 0893/3326] x86/entry/64: Use a per-CPU trampoline stack for IDT entries commit 7f2590a110b837af5679d08fc25c6227c5a8c497 upstream. Historically, IDT entries from usermode have always gone directly to the running task's kernel stack. Rearrange it so that we enter on a per-CPU trampoline stack and then manually switch to the task's stack. This touches a couple of extra cachelines, but it gives us a chance to run some code before we touch the kernel stack. The asm isn't exactly beautiful, but I think that fully refactoring it can wait. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Reviewed-by: Thomas Gleixner Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150606.225330557@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 67 ++++++++++++++++++++++++-------- arch/x86/entry/entry_64_compat.S | 5 ++- arch/x86/include/asm/switch_to.h | 4 +- arch/x86/include/asm/traps.h | 1 - arch/x86/kernel/cpu/common.c | 6 ++- arch/x86/kernel/traps.c | 21 +++++----- 6 files changed, 72 insertions(+), 32 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 162ce952d71611..9b39ce935af758 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -560,6 +560,13 @@ END(irq_entries_start) /* 0(%rsp): ~(interrupt number) */ .macro interrupt func cld + + testb $3, CS-ORIG_RAX(%rsp) + jz 1f + SWAPGS + call switch_to_thread_stack +1: + ALLOC_PT_GPREGS_ON_STACK SAVE_C_REGS SAVE_EXTRA_REGS @@ -569,12 +576,8 @@ END(irq_entries_start) jz 1f /* - * IRQ from user mode. Switch to kernel gsbase and inform context - * tracking that we're in kernel mode. - */ - SWAPGS - - /* + * IRQ from user mode. + * * We need to tell lockdep that IRQs are off. We can't do this until * we fix gsbase, and we should do it before enter_from_user_mode * (which can take locks). Since TRACE_IRQS_OFF idempotent, @@ -828,6 +831,32 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt */ #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) +/* + * Switch to the thread stack. This is called with the IRET frame and + * orig_ax on the stack. (That is, RDI..R12 are not on the stack and + * space has not been allocated for them.) + */ +ENTRY(switch_to_thread_stack) + UNWIND_HINT_FUNC + + pushq %rdi + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI + + pushq 7*8(%rdi) /* regs->ss */ + pushq 6*8(%rdi) /* regs->rsp */ + pushq 5*8(%rdi) /* regs->eflags */ + pushq 4*8(%rdi) /* regs->cs */ + pushq 3*8(%rdi) /* regs->ip */ + pushq 2*8(%rdi) /* regs->orig_ax */ + pushq 8(%rdi) /* return address */ + UNWIND_HINT_FUNC + + movq (%rdi), %rdi + ret +END(switch_to_thread_stack) + .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ENTRY(\sym) UNWIND_HINT_IRET_REGS offset=\has_error_code*8 @@ -845,11 +874,12 @@ ENTRY(\sym) ALLOC_PT_GPREGS_ON_STACK - .if \paranoid - .if \paranoid == 1 + .if \paranoid < 2 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ - jnz 1f + jnz .Lfrom_usermode_switch_stack_\@ .endif + + .if \paranoid call paranoid_entry .else call error_entry @@ -891,20 +921,15 @@ ENTRY(\sym) jmp error_exit .endif - .if \paranoid == 1 + .if \paranoid < 2 /* - * Paranoid entry from userspace. Switch stacks and treat it + * Entry from userspace. Switch stacks and treat it * as a normal entry. This means that paranoid handlers * run in real process context if user_mode(regs). */ -1: +.Lfrom_usermode_switch_stack_\@: call error_entry - - movq %rsp, %rdi /* pt_regs pointer */ - call sync_regs - movq %rax, %rsp /* switch stack */ - movq %rsp, %rdi /* pt_regs pointer */ .if \has_error_code @@ -1165,6 +1190,14 @@ ENTRY(error_entry) SWAPGS .Lerror_entry_from_usermode_after_swapgs: + /* Put us onto the real thread stack. */ + popq %r12 /* save return addr in %12 */ + movq %rsp, %rdi /* arg0 = pt_regs pointer */ + call sync_regs + movq %rax, %rsp /* switch stack */ + ENCODE_FRAME_POINTER + pushq %r12 + /* * We need to tell lockdep that IRQs are off. We can't do this until * we fix gsbase, and we should do it before enter_from_user_mode diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index dcc6987f9bae16..95ad40eb7effbd 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -306,8 +306,11 @@ ENTRY(entry_INT80_compat) */ movl %eax, %eax - /* Construct struct pt_regs on stack (iret frame is already on stack) */ pushq %rax /* pt_regs->orig_ax */ + + /* switch to thread stack expects orig_ax to be pushed */ + call switch_to_thread_stack + pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ pushq %rdx /* pt_regs->dx */ diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 8c6bd6863db9d6..cbc71e73bd3216 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -90,10 +90,12 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread) /* This is used when switching tasks or entering/exiting vm86 mode. */ static inline void update_sp0(struct task_struct *task) { + /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */ #ifdef CONFIG_X86_32 load_sp0(task->thread.sp0); #else - load_sp0(task_top_of_stack(task)); + if (static_cpu_has(X86_FEATURE_XENPV)) + load_sp0(task_top_of_stack(task)); #endif } diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 1fadd310ff680e..31051f35cbb768 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -75,7 +75,6 @@ dotraplinkage void do_segment_not_present(struct pt_regs *, long); dotraplinkage void do_stack_segment(struct pt_regs *, long); #ifdef CONFIG_X86_64 dotraplinkage void do_double_fault(struct pt_regs *, long); -asmlinkage struct pt_regs *sync_regs(struct pt_regs *); #endif dotraplinkage void do_general_protection(struct pt_regs *, long); dotraplinkage void do_page_fault(struct pt_regs *, unsigned long); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index e5837bd6c67251..57968880e39b6d 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1623,11 +1623,13 @@ void cpu_init(void) setup_cpu_entry_area(cpu); /* - * Initialize the TSS. Don't bother initializing sp0, as the initial - * task never enters user mode. + * Initialize the TSS. sp0 points to the entry trampoline stack + * regardless of what task is running. */ set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); load_TR_desc(); + load_sp0((unsigned long)&get_cpu_entry_area(cpu)->tss + + offsetofend(struct tss_struct, SYSENTER_stack)); load_mm_ldt(&init_mm); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index f0029d17b14b1e..ee9ca0ad438854 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -619,14 +619,15 @@ NOKPROBE_SYMBOL(do_int3); #ifdef CONFIG_X86_64 /* - * Help handler running on IST stack to switch off the IST stack if the - * interrupted code was in user mode. The actual stack switch is done in - * entry_64.S + * Help handler running on a per-cpu (IST or entry trampoline) stack + * to switch to the normal thread stack if the interrupted code was in + * user mode. The actual stack switch is done in entry_64.S */ asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) { - struct pt_regs *regs = task_pt_regs(current); - *regs = *eregs; + struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1; + if (regs != eregs) + *regs = *eregs; return regs; } NOKPROBE_SYMBOL(sync_regs); @@ -642,13 +643,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) /* * This is called from entry_64.S early in handling a fault * caused by a bad iret to user mode. To handle the fault - * correctly, we want move our stack frame to task_pt_regs - * and we want to pretend that the exception came from the - * iret target. + * correctly, we want to move our stack frame to where it would + * be had we entered directly on the entry stack (rather than + * just below the IRET frame) and we want to pretend that the + * exception came from the IRET target. */ struct bad_iret_stack *new_stack = - container_of(task_pt_regs(current), - struct bad_iret_stack, regs); + (struct bad_iret_stack *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1; /* Copy the IRET target to the new stack. */ memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); From 564cea11777e9d73aa04ce5f6fbe9169d7c116a2 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:24 +0100 Subject: [PATCH 0894/3326] x86/entry/64: Return to userspace from the trampoline stack commit 3e3b9293d392c577b62e24e4bc9982320438e749 upstream. By itself, this is useless. It gives us the ability to run some final code before exit that cannnot run on the kernel stack. This could include a CR3 switch a la PAGE_TABLE_ISOLATION or some kernel stack erasing, for example. (Or even weird things like *changing* which kernel stack gets used as an ASLR-strengthening mechanism.) The SYSRET32 path is not covered yet. It could be in the future or we could just ignore it and force the slow path if needed. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150606.306546484@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 55 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 51 insertions(+), 4 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 9b39ce935af758..9323585c4de618 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -326,8 +326,24 @@ syscall_return_via_sysret: popq %rsi /* skip rcx */ popq %rdx popq %rsi + + /* + * Now all regs are restored except RSP and RDI. + * Save old stack pointer and switch to trampoline stack. + */ + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp + + pushq RSP-RDI(%rdi) /* RSP */ + pushq (%rdi) /* RDI */ + + /* + * We are on the trampoline stack. All regs except RDI are live. + * We can do future final exit work right here. + */ + popq %rdi - movq RSP-ORIG_RAX(%rsp), %rsp + popq %rsp USERGS_SYSRET64 END(entry_SYSCALL_64) @@ -630,10 +646,41 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) ud2 1: #endif - SWAPGS POP_EXTRA_REGS - POP_C_REGS - addq $8, %rsp /* skip regs->orig_ax */ + popq %r11 + popq %r10 + popq %r9 + popq %r8 + popq %rax + popq %rcx + popq %rdx + popq %rsi + + /* + * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. + * Save old stack pointer and switch to trampoline stack. + */ + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp + + /* Copy the IRET frame to the trampoline stack. */ + pushq 6*8(%rdi) /* SS */ + pushq 5*8(%rdi) /* RSP */ + pushq 4*8(%rdi) /* EFLAGS */ + pushq 3*8(%rdi) /* CS */ + pushq 2*8(%rdi) /* RIP */ + + /* Push user RDI on the trampoline stack. */ + pushq (%rdi) + + /* + * We are on the trampoline stack. All regs except RDI are live. + * We can do future final exit work right here. + */ + + /* Restore RDI. */ + popq %rdi + SWAPGS INTERRUPT_RETURN From c631a16e5b84521ae80ab10d47cd6ccd2afc6817 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:25 +0100 Subject: [PATCH 0895/3326] x86/entry/64: Create a per-CPU SYSCALL entry trampoline commit 3386bc8aed825e9f1f65ce38df4b109b2019b71a upstream. Handling SYSCALL is tricky: the SYSCALL handler is entered with every single register (except FLAGS), including RSP, live. It somehow needs to set RSP to point to a valid stack, which means it needs to save the user RSP somewhere and find its own stack pointer. The canonical way to do this is with SWAPGS, which lets us access percpu data using the %gs prefix. With PAGE_TABLE_ISOLATION-like pagetable switching, this is problematic. Without a scratch register, switching CR3 is impossible, so %gs-based percpu memory would need to be mapped in the user pagetables. Doing that without information leaks is difficult or impossible. Instead, use a different sneaky trick. Map a copy of the first part of the SYSCALL asm at a different address for each CPU. Now RIP varies depending on the CPU, so we can use RIP-relative memory access to access percpu memory. By putting the relevant information (one scratch slot and the stack address) at a constant offset relative to RIP, we can make SYSCALL work without relying on %gs. A nice thing about this approach is that we can easily switch it on and off if we want pagetable switching to be configurable. The compat variant of SYSCALL doesn't have this problem in the first place -- there are plenty of scratch registers, since we don't care about preserving r8-r15. This patch therefore doesn't touch SYSCALL32 at all. This patch actually seems to be a small speedup. With this patch, SYSCALL touches an extra cache line and an extra virtual page, but the pipeline no longer stalls waiting for SWAPGS. It seems that, at least in a tight loop, the latter outweights the former. Thanks to David Laight for an optimization tip. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150606.403607157@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 58 +++++++++++++++++++++++++++++++++++ arch/x86/include/asm/fixmap.h | 2 ++ arch/x86/kernel/asm-offsets.c | 1 + arch/x86/kernel/cpu/common.c | 15 ++++++++- arch/x86/kernel/vmlinux.lds.S | 9 ++++++ 5 files changed, 84 insertions(+), 1 deletion(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 9323585c4de618..47ec428d101a4c 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -136,6 +136,64 @@ END(native_usergs_sysret64) * with them due to bugs in both AMD and Intel CPUs. */ + .pushsection .entry_trampoline, "ax" + +/* + * The code in here gets remapped into cpu_entry_area's trampoline. This means + * that the assembler and linker have the wrong idea as to where this code + * lives (and, in fact, it's mapped more than once, so it's not even at a + * fixed address). So we can't reference any symbols outside the entry + * trampoline and expect it to work. + * + * Instead, we carefully abuse %rip-relative addressing. + * _entry_trampoline(%rip) refers to the start of the remapped) entry + * trampoline. We can thus find cpu_entry_area with this macro: + */ + +#define CPU_ENTRY_AREA \ + _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) + +/* The top word of the SYSENTER stack is hot and is usable as scratch space. */ +#define RSP_SCRATCH CPU_ENTRY_AREA_tss + CPU_TSS_SYSENTER_stack + \ + SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA + +ENTRY(entry_SYSCALL_64_trampoline) + UNWIND_HINT_EMPTY + swapgs + + /* Stash the user RSP. */ + movq %rsp, RSP_SCRATCH + + /* Load the top of the task stack into RSP */ + movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp + + /* Start building the simulated IRET frame. */ + pushq $__USER_DS /* pt_regs->ss */ + pushq RSP_SCRATCH /* pt_regs->sp */ + pushq %r11 /* pt_regs->flags */ + pushq $__USER_CS /* pt_regs->cs */ + pushq %rcx /* pt_regs->ip */ + + /* + * x86 lacks a near absolute jump, and we can't jump to the real + * entry text with a relative jump. We could push the target + * address and then use retq, but this destroys the pipeline on + * many CPUs (wasting over 20 cycles on Sandy Bridge). Instead, + * spill RDI and restore it in a second-stage trampoline. + */ + pushq %rdi + movq $entry_SYSCALL_64_stage2, %rdi + jmp *%rdi +END(entry_SYSCALL_64_trampoline) + + .popsection + +ENTRY(entry_SYSCALL_64_stage2) + UNWIND_HINT_EMPTY + popq %rdi + jmp entry_SYSCALL_64_after_hwframe +END(entry_SYSCALL_64_stage2) + ENTRY(entry_SYSCALL_64) UNWIND_HINT_EMPTY /* diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 84558b611ad33d..6a699474c2c7dd 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -61,6 +61,8 @@ struct cpu_entry_area { * of the TSS region. */ struct tss_struct tss; + + char entry_trampoline[PAGE_SIZE]; }; #define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE) diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 55858b277cf620..61b1af88ac0707 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -101,4 +101,5 @@ void common(void) { /* Layout info for cpu_entry_area */ OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss); + OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline); } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 57968880e39b6d..430f950b0b7fa3 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -486,6 +486,8 @@ DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); static inline void setup_cpu_entry_area(int cpu) { #ifdef CONFIG_X86_64 + extern char _entry_trampoline[]; + /* On 64-bit systems, we use a read-only fixmap GDT. */ pgprot_t gdt_prot = PAGE_KERNEL_RO; #else @@ -532,6 +534,11 @@ static inline void setup_cpu_entry_area(int cpu) #ifdef CONFIG_X86_32 this_cpu_write(cpu_entry_area, get_cpu_entry_area(cpu)); #endif + +#ifdef CONFIG_X86_64 + __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline), + __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); +#endif } /* Load the original GDT from the per-cpu structure */ @@ -1395,10 +1402,16 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks /* May not be marked __init: used by software suspend */ void syscall_init(void) { + extern char _entry_trampoline[]; + extern char entry_SYSCALL_64_trampoline[]; + int cpu = smp_processor_id(); + unsigned long SYSCALL64_entry_trampoline = + (unsigned long)get_cpu_entry_area(cpu)->entry_trampoline + + (entry_SYSCALL_64_trampoline - _entry_trampoline); wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); - wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); + wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline); #ifdef CONFIG_IA32_EMULATION wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index a4009fb9be8725..d2a8b5a24a44a5 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -107,6 +107,15 @@ SECTIONS SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) + +#ifdef CONFIG_X86_64 + . = ALIGN(PAGE_SIZE); + _entry_trampoline = .; + *(.entry_trampoline) + . = ALIGN(PAGE_SIZE); + ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); +#endif + /* End of text section */ _etext = .; } :text = 0x9090 From bb568391775d4a840992e2d2493f39d6e86401e3 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:26 +0100 Subject: [PATCH 0896/3326] x86/entry/64: Move the IST stacks into struct cpu_entry_area commit 40e7f949e0d9a33968ebde5d67f7e3a47c97742a upstream. The IST stacks are needed when an IST exception occurs and are accessed before any kernel code at all runs. Move them into struct cpu_entry_area. The IST stacks are unlike the rest of cpu_entry_area: they're used even for entries from kernel mode. This means that they should be set up before we load the final IDT. Move cpu_entry_area setup to trap_init() for the boot CPU and set it up for all possible CPUs at once in native_smp_prepare_cpus(). Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150606.480598743@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/fixmap.h | 12 ++++++ arch/x86/kernel/cpu/common.c | 74 ++++++++++++++++++++--------------- arch/x86/kernel/traps.c | 3 ++ 3 files changed, 57 insertions(+), 32 deletions(-) diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 6a699474c2c7dd..451da7d9a50268 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -63,10 +63,22 @@ struct cpu_entry_area { struct tss_struct tss; char entry_trampoline[PAGE_SIZE]; + +#ifdef CONFIG_X86_64 + /* + * Exception stacks used for IST entries. + * + * In the future, this should have a separate slot for each stack + * with guard pages between them. + */ + char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]; +#endif }; #define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE) +extern void setup_cpu_entry_areas(void); + /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 430f950b0b7fa3..fb01a8e5e9b7df 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -466,24 +466,36 @@ void load_percpu_segment(int cpu) load_stack_canary_segment(); } -static void set_percpu_fixmap_pages(int fixmap_index, void *ptr, - int pages, pgprot_t prot) -{ - int i; - - for (i = 0; i < pages; i++) { - __set_fixmap(fixmap_index - i, - per_cpu_ptr_to_phys(ptr + i * PAGE_SIZE), prot); - } -} - #ifdef CONFIG_X86_32 /* The 32-bit entry code needs to find cpu_entry_area. */ DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); #endif +#ifdef CONFIG_X86_64 +/* + * Special IST stacks which the CPU switches to when it calls + * an IST-marked descriptor entry. Up to 7 stacks (hardware + * limit), all of them are 4K, except the debug stack which + * is 8K. + */ +static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { + [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, + [DEBUG_STACK - 1] = DEBUG_STKSZ +}; + +static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks + [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); +#endif + +static void __init +set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) +{ + for ( ; pages; pages--, idx--, ptr += PAGE_SIZE) + __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot); +} + /* Setup the fixmap mappings only once per-processor */ -static inline void setup_cpu_entry_area(int cpu) +static void __init setup_cpu_entry_area(int cpu) { #ifdef CONFIG_X86_64 extern char _entry_trampoline[]; @@ -532,15 +544,31 @@ static inline void setup_cpu_entry_area(int cpu) PAGE_KERNEL); #ifdef CONFIG_X86_32 - this_cpu_write(cpu_entry_area, get_cpu_entry_area(cpu)); + per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); #endif #ifdef CONFIG_X86_64 + BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); + BUILD_BUG_ON(sizeof(exception_stacks) != + sizeof(((struct cpu_entry_area *)0)->exception_stacks)); + set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks), + &per_cpu(exception_stacks, cpu), + sizeof(exception_stacks) / PAGE_SIZE, + PAGE_KERNEL); + __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline), __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); #endif } +void __init setup_cpu_entry_areas(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) + setup_cpu_entry_area(cpu); +} + /* Load the original GDT from the per-cpu structure */ void load_direct_gdt(int cpu) { @@ -1385,20 +1413,6 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; EXPORT_PER_CPU_SYMBOL(__preempt_count); -/* - * Special IST stacks which the CPU switches to when it calls - * an IST-marked descriptor entry. Up to 7 stacks (hardware - * limit), all of them are 4K, except the debug stack which - * is 8K. - */ -static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { - [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, - [DEBUG_STACK - 1] = DEBUG_STKSZ -}; - -static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks - [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); - /* May not be marked __init: used by software suspend */ void syscall_init(void) { @@ -1607,7 +1621,7 @@ void cpu_init(void) * set up and load the per-CPU TSS */ if (!oist->ist[0]) { - char *estacks = per_cpu(exception_stacks, cpu); + char *estacks = get_cpu_entry_area(cpu)->exception_stacks; for (v = 0; v < N_EXCEPTION_STACKS; v++) { estacks += exception_stack_sizes[v]; @@ -1633,8 +1647,6 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, me); - setup_cpu_entry_area(cpu); - /* * Initialize the TSS. sp0 points to the entry trampoline stack * regardless of what task is running. @@ -1694,8 +1706,6 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, curr); - setup_cpu_entry_area(cpu); - /* * Initialize the TSS. Don't bother initializing sp0, as the initial * task never enters user mode. diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ee9ca0ad438854..3e29aad5c7cc67 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -947,6 +947,9 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) void __init trap_init(void) { + /* Init cpu_entry_area before IST entries are set up */ + setup_cpu_entry_areas(); + idt_setup_traps(); /* From b25ca49efac58fe9744b9a3f725c4e4d69b276f8 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:27 +0100 Subject: [PATCH 0897/3326] x86/entry/64: Remove the SYSENTER stack canary commit 7fbbd5cbebf118a9e09f5453f686656a167c3d1c upstream. Now that the SYSENTER stack has a guard page, there's no need for a canary to detect overflow after the fact. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150606.572577316@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/processor.h | 1 - arch/x86/kernel/dumpstack.c | 3 +-- arch/x86/kernel/process.c | 1 - arch/x86/kernel/traps.c | 7 ------- 4 files changed, 1 insertion(+), 11 deletions(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index b0cf0612a454d6..d34ac13c586652 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -341,7 +341,6 @@ struct tss_struct { * Space for the temporary SYSENTER stack, used for SYSENTER * and the entry trampoline as well. */ - unsigned long SYSENTER_stack_canary; unsigned long SYSENTER_stack[64]; /* diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 60267850125e68..ae1ce2e3f13260 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -48,8 +48,7 @@ bool in_sysenter_stack(unsigned long *stack, struct stack_info *info) int cpu = smp_processor_id(); struct tss_struct *tss = &get_cpu_entry_area(cpu)->tss; - /* Treat the canary as part of the stack for unwinding purposes. */ - void *begin = &tss->SYSENTER_stack_canary; + void *begin = &tss->SYSENTER_stack; void *end = (void *)&tss->SYSENTER_stack + sizeof(tss->SYSENTER_stack); if ((void *)stack < begin || (void *)stack >= end) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 86e83762e3b378..6a04287f222b29 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -81,7 +81,6 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { */ .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, #endif - .SYSENTER_stack_canary = STACK_END_MAGIC, }; EXPORT_PER_CPU_SYMBOL(cpu_tss); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 3e29aad5c7cc67..5ade4f89a6d169 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -814,13 +814,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) debug_stack_usage_dec(); exit: - /* - * This is the most likely code path that involves non-trivial use - * of the SYSENTER stack. Check that we haven't overrun it. - */ - WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC, - "Overran or corrupted SYSENTER stack\n"); - ist_exit(regs); } NOKPROBE_SYMBOL(do_debug); From e313437c85da22a26fe1ba01d94abec7698b1987 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:28 +0100 Subject: [PATCH 0898/3326] x86/entry: Clean up the SYSENTER_stack code commit 0f9a48100fba3f189724ae88a450c2261bf91c80 upstream. The existing code was a mess, mainly because C arrays are nasty. Turn SYSENTER_stack into a struct, add a helper to find it, and do all the obvious cleanups this enables. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150606.653244723@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_32.S | 4 ++-- arch/x86/entry/entry_64.S | 2 +- arch/x86/include/asm/fixmap.h | 5 +++++ arch/x86/include/asm/processor.h | 6 +++++- arch/x86/kernel/asm-offsets.c | 6 ++---- arch/x86/kernel/cpu/common.c | 14 +++----------- arch/x86/kernel/dumpstack.c | 7 +++---- 7 files changed, 21 insertions(+), 23 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 0ab316c4680673..3629bcbf85a2dc 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -942,7 +942,7 @@ ENTRY(debug) /* Are we currently on the SYSENTER stack? */ movl PER_CPU_VAR(cpu_entry_area), %ecx - addl $CPU_ENTRY_AREA_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx + addl $CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ cmpl $SIZEOF_SYSENTER_stack, %ecx jb .Ldebug_from_sysenter_stack @@ -986,7 +986,7 @@ ENTRY(nmi) /* Are we currently on the SYSENTER stack? */ movl PER_CPU_VAR(cpu_entry_area), %ecx - addl $CPU_ENTRY_AREA_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx + addl $CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ cmpl $SIZEOF_SYSENTER_stack, %ecx jb .Lnmi_from_sysenter_stack diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 47ec428d101a4c..ba0933532f2f6c 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -154,7 +154,7 @@ END(native_usergs_sysret64) _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) /* The top word of the SYSENTER stack is hot and is usable as scratch space. */ -#define RSP_SCRATCH CPU_ENTRY_AREA_tss + CPU_TSS_SYSENTER_stack + \ +#define RSP_SCRATCH CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + \ SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA ENTRY(entry_SYSCALL_64_trampoline) diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 451da7d9a50268..cc5d98bdca37f1 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -245,5 +245,10 @@ static inline struct cpu_entry_area *get_cpu_entry_area(int cpu) return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0)); } +static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu) +{ + return &get_cpu_entry_area(cpu)->tss.SYSENTER_stack; +} + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_FIXMAP_H */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index d34ac13c586652..f933869470b89b 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -336,12 +336,16 @@ struct x86_hw_tss { #define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss)) #define INVALID_IO_BITMAP_OFFSET 0x8000 +struct SYSENTER_stack { + unsigned long words[64]; +}; + struct tss_struct { /* * Space for the temporary SYSENTER stack, used for SYSENTER * and the entry trampoline as well. */ - unsigned long SYSENTER_stack[64]; + struct SYSENTER_stack SYSENTER_stack; /* * The fixed hardware portion. This must not cross a page boundary diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 61b1af88ac0707..46c0995344aaad 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -94,10 +94,8 @@ void common(void) { BLANK(); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); - /* Offset from cpu_tss to SYSENTER_stack */ - OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack); - /* Size of SYSENTER_stack */ - DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack)); + OFFSET(TSS_STRUCT_SYSENTER_stack, tss_struct, SYSENTER_stack); + DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack)); /* Layout info for cpu_entry_area */ OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index fb01a8e5e9b7df..3de7480e4f3267 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1314,12 +1314,7 @@ void enable_sep_cpu(void) tss->x86_tss.ss1 = __KERNEL_CS; wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); - - wrmsr(MSR_IA32_SYSENTER_ESP, - (unsigned long)&get_cpu_entry_area(cpu)->tss + - offsetofend(struct tss_struct, SYSENTER_stack), - 0); - + wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_SYSENTER_stack(cpu) + 1), 0); wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); put_cpu(); @@ -1436,9 +1431,7 @@ void syscall_init(void) * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). */ wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); - wrmsrl_safe(MSR_IA32_SYSENTER_ESP, - (unsigned long)&get_cpu_entry_area(cpu)->tss + - offsetofend(struct tss_struct, SYSENTER_stack)); + wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_SYSENTER_stack(cpu) + 1)); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); #else wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); @@ -1653,8 +1646,7 @@ void cpu_init(void) */ set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); load_TR_desc(); - load_sp0((unsigned long)&get_cpu_entry_area(cpu)->tss + - offsetofend(struct tss_struct, SYSENTER_stack)); + load_sp0((unsigned long)(cpu_SYSENTER_stack(cpu) + 1)); load_mm_ldt(&init_mm); diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index ae1ce2e3f13260..bbd6d986e2d0fc 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -45,11 +45,10 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task, bool in_sysenter_stack(unsigned long *stack, struct stack_info *info) { - int cpu = smp_processor_id(); - struct tss_struct *tss = &get_cpu_entry_area(cpu)->tss; + struct SYSENTER_stack *ss = cpu_SYSENTER_stack(smp_processor_id()); - void *begin = &tss->SYSENTER_stack; - void *end = (void *)&tss->SYSENTER_stack + sizeof(tss->SYSENTER_stack); + void *begin = ss; + void *end = ss + 1; if ((void *)stack < begin || (void *)stack >= end) return false; From 25e2999e630c744c27c9179d1279c7f9d503385e Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:29 +0100 Subject: [PATCH 0899/3326] x86/entry/64: Make cpu_entry_area.tss read-only commit c482feefe1aeb150156248ba0fd3e029bc886605 upstream. The TSS is a fairly juicy target for exploits, and, now that the TSS is in the cpu_entry_area, it's no longer protected by kASLR. Make it read-only on x86_64. On x86_32, it can't be RO because it's written by the CPU during task switches, and we use a task gate for double faults. I'd also be nervous about errata if we tried to make it RO even on configurations without double fault handling. [ tglx: AMD confirmed that there is no problem on 64-bit with TSS RO. So it's probably safe to assume that it's a non issue, though Intel might have been creative in that area. Still waiting for confirmation. ] Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Kees Cook Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150606.733700132@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_32.S | 4 ++-- arch/x86/entry/entry_64.S | 8 ++++---- arch/x86/include/asm/fixmap.h | 13 +++++++++---- arch/x86/include/asm/processor.h | 17 ++++++++--------- arch/x86/include/asm/switch_to.h | 4 ++-- arch/x86/include/asm/thread_info.h | 2 +- arch/x86/kernel/asm-offsets.c | 5 ++--- arch/x86/kernel/asm-offsets_32.c | 4 ++-- arch/x86/kernel/cpu/common.c | 29 +++++++++++++++++++---------- arch/x86/kernel/ioport.c | 2 +- arch/x86/kernel/process.c | 6 +++--- arch/x86/kernel/process_32.c | 2 +- arch/x86/kernel/process_64.c | 2 +- arch/x86/kernel/traps.c | 4 ++-- arch/x86/lib/delay.c | 4 ++-- arch/x86/xen/enlighten_pv.c | 2 +- 16 files changed, 60 insertions(+), 48 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 3629bcbf85a2dc..bd8b57a5c874bc 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -942,7 +942,7 @@ ENTRY(debug) /* Are we currently on the SYSENTER stack? */ movl PER_CPU_VAR(cpu_entry_area), %ecx - addl $CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx + addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ cmpl $SIZEOF_SYSENTER_stack, %ecx jb .Ldebug_from_sysenter_stack @@ -986,7 +986,7 @@ ENTRY(nmi) /* Are we currently on the SYSENTER stack? */ movl PER_CPU_VAR(cpu_entry_area), %ecx - addl $CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx + addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ cmpl $SIZEOF_SYSENTER_stack, %ecx jb .Lnmi_from_sysenter_stack diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index ba0933532f2f6c..6abe3fcaece997 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -154,7 +154,7 @@ END(native_usergs_sysret64) _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) /* The top word of the SYSENTER stack is hot and is usable as scratch space. */ -#define RSP_SCRATCH CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + \ +#define RSP_SCRATCH CPU_ENTRY_AREA_SYSENTER_stack + \ SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA ENTRY(entry_SYSCALL_64_trampoline) @@ -390,7 +390,7 @@ syscall_return_via_sysret: * Save old stack pointer and switch to trampoline stack. */ movq %rsp, %rdi - movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp pushq RSP-RDI(%rdi) /* RSP */ pushq (%rdi) /* RDI */ @@ -719,7 +719,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) * Save old stack pointer and switch to trampoline stack. */ movq %rsp, %rdi - movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp /* Copy the IRET frame to the trampoline stack. */ pushq 6*8(%rdi) /* SS */ @@ -934,7 +934,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt /* * Exception entry points. */ -#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) +#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) /* * Switch to the thread stack. This is called with the IRET frame and diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index cc5d98bdca37f1..94fc4fa141275b 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -56,9 +56,14 @@ struct cpu_entry_area { char gdt[PAGE_SIZE]; /* - * The GDT is just below cpu_tss and thus serves (on x86_64) as a - * a read-only guard page for the SYSENTER stack at the bottom - * of the TSS region. + * The GDT is just below SYSENTER_stack and thus serves (on x86_64) as + * a a read-only guard page. + */ + struct SYSENTER_stack_page SYSENTER_stack_page; + + /* + * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because + * we need task switches to work, and task switches write to the TSS. */ struct tss_struct tss; @@ -247,7 +252,7 @@ static inline struct cpu_entry_area *get_cpu_entry_area(int cpu) static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu) { - return &get_cpu_entry_area(cpu)->tss.SYSENTER_stack; + return &get_cpu_entry_area(cpu)->SYSENTER_stack_page.stack; } #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index f933869470b89b..e8991d7f7034fe 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -340,13 +340,11 @@ struct SYSENTER_stack { unsigned long words[64]; }; -struct tss_struct { - /* - * Space for the temporary SYSENTER stack, used for SYSENTER - * and the entry trampoline as well. - */ - struct SYSENTER_stack SYSENTER_stack; +struct SYSENTER_stack_page { + struct SYSENTER_stack stack; +} __aligned(PAGE_SIZE); +struct tss_struct { /* * The fixed hardware portion. This must not cross a page boundary * at risk of violating the SDM's advice and potentially triggering @@ -363,7 +361,7 @@ struct tss_struct { unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; } __aligned(PAGE_SIZE); -DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss); +DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw); /* * sizeof(unsigned long) coming from an extra "long" at the end @@ -378,7 +376,8 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss); #ifdef CONFIG_X86_32 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); #else -#define cpu_current_top_of_stack cpu_tss.x86_tss.sp1 +/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */ +#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1 #endif /* @@ -538,7 +537,7 @@ static inline void native_set_iopl_mask(unsigned mask) static inline void native_load_sp0(unsigned long sp0) { - this_cpu_write(cpu_tss.x86_tss.sp0, sp0); + this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); } static inline void native_swapgs(void) diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index cbc71e73bd3216..9b6df68d8fd1eb 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -79,10 +79,10 @@ do { \ static inline void refresh_sysenter_cs(struct thread_struct *thread) { /* Only happens when SEP is enabled, no need to test "SEP"arately: */ - if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs)) + if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs)) return; - this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs); + this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs); wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); } #endif diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 44a04999791e05..00223333821a96 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -207,7 +207,7 @@ static inline int arch_within_stack_frames(const void * const stack, #else /* !__ASSEMBLY__ */ #ifdef CONFIG_X86_64 -# define cpu_current_top_of_stack (cpu_tss + TSS_sp1) +# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1) #endif #endif diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 46c0995344aaad..cd360a5e0dca30 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -94,10 +94,9 @@ void common(void) { BLANK(); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); - OFFSET(TSS_STRUCT_SYSENTER_stack, tss_struct, SYSENTER_stack); - DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack)); - /* Layout info for cpu_entry_area */ OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss); OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline); + OFFSET(CPU_ENTRY_AREA_SYSENTER_stack, cpu_entry_area, SYSENTER_stack_page); + DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack)); } diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 52ce4ea16e5386..7d20d9c0b3d69c 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -47,8 +47,8 @@ void foo(void) BLANK(); /* Offset from the sysenter stack to tss.sp0 */ - DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - - offsetofend(struct tss_struct, SYSENTER_stack)); + DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) - + offsetofend(struct cpu_entry_area, SYSENTER_stack_page.stack)); #ifdef CONFIG_CC_STACKPROTECTOR BLANK(); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 3de7480e4f3267..c2eada1056de8c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -487,6 +487,9 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); #endif +static DEFINE_PER_CPU_PAGE_ALIGNED(struct SYSENTER_stack_page, + SYSENTER_stack_storage); + static void __init set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) { @@ -500,23 +503,29 @@ static void __init setup_cpu_entry_area(int cpu) #ifdef CONFIG_X86_64 extern char _entry_trampoline[]; - /* On 64-bit systems, we use a read-only fixmap GDT. */ + /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */ pgprot_t gdt_prot = PAGE_KERNEL_RO; + pgprot_t tss_prot = PAGE_KERNEL_RO; #else /* * On native 32-bit systems, the GDT cannot be read-only because * our double fault handler uses a task gate, and entering through - * a task gate needs to change an available TSS to busy. If the GDT - * is read-only, that will triple fault. + * a task gate needs to change an available TSS to busy. If the + * GDT is read-only, that will triple fault. The TSS cannot be + * read-only because the CPU writes to it on task switches. * - * On Xen PV, the GDT must be read-only because the hypervisor requires - * it. + * On Xen PV, the GDT must be read-only because the hypervisor + * requires it. */ pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ? PAGE_KERNEL_RO : PAGE_KERNEL; + pgprot_t tss_prot = PAGE_KERNEL; #endif __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); + set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, SYSENTER_stack_page), + per_cpu_ptr(&SYSENTER_stack_storage, cpu), 1, + PAGE_KERNEL); /* * The Intel SDM says (Volume 3, 7.2.1): @@ -539,9 +548,9 @@ static void __init setup_cpu_entry_area(int cpu) offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss), - &per_cpu(cpu_tss, cpu), + &per_cpu(cpu_tss_rw, cpu), sizeof(struct tss_struct) / PAGE_SIZE, - PAGE_KERNEL); + tss_prot); #ifdef CONFIG_X86_32 per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); @@ -1305,7 +1314,7 @@ void enable_sep_cpu(void) return; cpu = get_cpu(); - tss = &per_cpu(cpu_tss, cpu); + tss = &per_cpu(cpu_tss_rw, cpu); /* * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- @@ -1575,7 +1584,7 @@ void cpu_init(void) if (cpu) load_ucode_ap(); - t = &per_cpu(cpu_tss, cpu); + t = &per_cpu(cpu_tss_rw, cpu); oist = &per_cpu(orig_ist, cpu); #ifdef CONFIG_NUMA @@ -1667,7 +1676,7 @@ void cpu_init(void) { int cpu = smp_processor_id(); struct task_struct *curr = current; - struct tss_struct *t = &per_cpu(cpu_tss, cpu); + struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu); wait_for_master_cpu(cpu); diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 3feb648781c470..2f723301eb58fc 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c @@ -67,7 +67,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) * because the ->io_bitmap_max value must match the bitmap * contents: */ - tss = &per_cpu(cpu_tss, get_cpu()); + tss = &per_cpu(cpu_tss_rw, get_cpu()); if (turn_on) bitmap_clear(t->io_bitmap_ptr, from, num); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 6a04287f222b29..51741597840936 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -47,7 +47,7 @@ * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { +__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = { .x86_tss = { /* * .sp0 is only used when entering ring 0 from a lower @@ -82,7 +82,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, #endif }; -EXPORT_PER_CPU_SYMBOL(cpu_tss); +EXPORT_PER_CPU_SYMBOL(cpu_tss_rw); DEFINE_PER_CPU(bool, __tss_limit_invalid); EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); @@ -111,7 +111,7 @@ void exit_thread(struct task_struct *tsk) struct fpu *fpu = &t->fpu; if (bp) { - struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu()); + struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu()); t->io_bitmap_ptr = NULL; clear_thread_flag(TIF_IO_BITMAP); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 45bf0c5f93e151..5224c609918416 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct fpu *prev_fpu = &prev->fpu; struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(cpu_tss, cpu); + struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 157f8181691524..c7546623201631 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -399,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct fpu *prev_fpu = &prev->fpu; struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(cpu_tss, cpu); + struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && this_cpu_read(irq_count) != -1); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 5ade4f89a6d169..74136fd16f491f 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -364,7 +364,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) regs->cs == __KERNEL_CS && regs->ip == (unsigned long)native_irq_return_iret) { - struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1; + struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; /* * regs->sp points to the failing IRET frame on the @@ -649,7 +649,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) * exception came from the IRET target. */ struct bad_iret_stack *new_stack = - (struct bad_iret_stack *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1; + (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; /* Copy the IRET target to the new stack. */ memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 553f8fd23cc473..4846eff7e4c8b1 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -107,10 +107,10 @@ static void delay_mwaitx(unsigned long __loops) delay = min_t(u64, MWAITX_MAX_LOOPS, loops); /* - * Use cpu_tss as a cacheline-aligned, seldomly + * Use cpu_tss_rw as a cacheline-aligned, seldomly * accessed per-cpu variable as the monitor target. */ - __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0); + __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0); /* * AMD, like Intel, supports the EAX hint and EAX=0xf diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index fbd054d6ac973d..ae3a071e1d0f4e 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -818,7 +818,7 @@ static void xen_load_sp0(unsigned long sp0) mcs = xen_mc_entry(0); MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0); xen_mc_issue(PARAVIRT_LAZY_CPU); - this_cpu_write(cpu_tss.x86_tss.sp0, sp0); + this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); } void xen_set_iopl_mask(unsigned mask) From 96e63420e281205e98cca0b6dbb5a79af98a5efd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 4 Dec 2017 15:07:30 +0100 Subject: [PATCH 0900/3326] x86/paravirt: Dont patch flush_tlb_single commit a035795499ca1c2bd1928808d1a156eda1420383 upstream. native_flush_tlb_single() will be changed with the upcoming PAGE_TABLE_ISOLATION feature. This requires to have more code in there than INVLPG. Remove the paravirt patching for it. Signed-off-by: Thomas Gleixner Reviewed-by: Josh Poimboeuf Reviewed-by: Juergen Gross Acked-by: Peter Zijlstra Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Cc: michael.schwarz@iaik.tugraz.at Cc: moritz.lipp@iaik.tugraz.at Cc: richard.fellner@student.tugraz.at Link: https://lkml.kernel.org/r/20171204150606.828111617@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/paravirt_patch_64.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index ac0be8283325ed..9edadabf04f66c 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -10,7 +10,6 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); -DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)"); DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); @@ -60,7 +59,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, PATCH_SITE(pv_mmu_ops, read_cr2); PATCH_SITE(pv_mmu_ops, read_cr3); PATCH_SITE(pv_mmu_ops, write_cr3); - PATCH_SITE(pv_mmu_ops, flush_tlb_single); PATCH_SITE(pv_cpu_ops, wbinvd); #if defined(CONFIG_PARAVIRT_SPINLOCKS) case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): From 1eb2e614fd17ae8de373aaf9919b803b0a6cfc27 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 4 Dec 2017 15:07:31 +0100 Subject: [PATCH 0901/3326] x86/paravirt: Provide a way to check for hypervisors commit 79cc74155218316b9a5d28577c7077b2adba8e58 upstream. There is no generic way to test whether a kernel is running on a specific hypervisor. But that's required to prevent the upcoming user address space separation feature in certain guest modes. Make the hypervisor type enum unconditionally available and provide a helper function which allows to test for a specific type. Signed-off-by: Thomas Gleixner Reviewed-by: Juergen Gross Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150606.912938129@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/hypervisor.h | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index 1b0a5abcd8aeb6..96aa6b9884dc5b 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -20,16 +20,7 @@ #ifndef _ASM_X86_HYPERVISOR_H #define _ASM_X86_HYPERVISOR_H -#ifdef CONFIG_HYPERVISOR_GUEST - -#include -#include -#include - -/* - * x86 hypervisor information - */ - +/* x86 hypervisor types */ enum x86_hypervisor_type { X86_HYPER_NATIVE = 0, X86_HYPER_VMWARE, @@ -39,6 +30,12 @@ enum x86_hypervisor_type { X86_HYPER_KVM, }; +#ifdef CONFIG_HYPERVISOR_GUEST + +#include +#include +#include + struct hypervisor_x86 { /* Hypervisor name */ const char *name; @@ -58,7 +55,15 @@ struct hypervisor_x86 { extern enum x86_hypervisor_type x86_hyper_type; extern void init_hypervisor_platform(void); +static inline bool hypervisor_is_type(enum x86_hypervisor_type type) +{ + return x86_hyper_type == type; +} #else static inline void init_hypervisor_platform(void) { } +static inline bool hypervisor_is_type(enum x86_hypervisor_type type) +{ + return type == X86_HYPER_NATIVE; +} #endif /* CONFIG_HYPERVISOR_GUEST */ #endif /* _ASM_X86_HYPERVISOR_H */ From 8388d287e361a2fd0a39bece30a736d692d5c3d8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 4 Dec 2017 15:07:32 +0100 Subject: [PATCH 0902/3326] x86/cpufeatures: Make CPU bugs sticky commit 6cbd2171e89b13377261d15e64384df60ecb530e upstream. There is currently no way to force CPU bug bits like CPU feature bits. That makes it impossible to set a bug bit once at boot and have it stick for all upcoming CPUs. Extend the force set/clear arrays to handle bug bits as well. Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150606.992156574@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeature.h | 2 ++ arch/x86/include/asm/processor.h | 4 ++-- arch/x86/kernel/cpu/common.c | 6 +++--- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index bf6a76202a779e..ea9a7dde62e5c4 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -135,6 +135,8 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); set_bit(bit, (unsigned long *)cpu_caps_set); \ } while (0) +#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) + #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS) /* * Static testing of CPU features. Used the same as boot_cpu_has(). diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index e8991d7f7034fe..da943411d3d88b 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -163,8 +163,8 @@ extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; extern struct x86_hw_tss doublefault_tss; -extern __u32 cpu_caps_cleared[NCAPINTS]; -extern __u32 cpu_caps_set[NCAPINTS]; +extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; +extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; #ifdef CONFIG_SMP DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c2eada1056de8c..034900623adfe8 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -452,8 +452,8 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c) return NULL; /* Not found */ } -__u32 cpu_caps_cleared[NCAPINTS]; -__u32 cpu_caps_set[NCAPINTS]; +__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; +__u32 cpu_caps_set[NCAPINTS + NBUGINTS]; void load_percpu_segment(int cpu) { @@ -812,7 +812,7 @@ static void apply_forced_caps(struct cpuinfo_x86 *c) { int i; - for (i = 0; i < NCAPINTS; i++) { + for (i = 0; i < NCAPINTS + NBUGINTS; i++) { c->x86_capability[i] &= ~cpu_caps_cleared[i]; c->x86_capability[i] |= cpu_caps_set[i]; } From 23b22186b27f6f16cddc69fd12b607c71de6451f Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Mon, 9 Oct 2017 11:11:49 +0200 Subject: [PATCH 0903/3326] optee: fix invalid of_node_put() in optee_driver_init() commit f044113113dd95ba73916bde10e804d3cdfa2662 upstream. The first node supplied to of_find_matching_node() has its reference counter decreased as part of call to that function. In optee_driver_init() after calling of_find_matching_node() it's invalid to call of_node_put() on the supplied node again. So remove the invalid call to of_node_put(). Reported-by: Alex Shi Signed-off-by: Jens Wiklander Cc: Signed-off-by: Greg Kroah-Hartman --- drivers/tee/optee/core.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 7952357df9c862..edb6e4e9ef3acb 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -590,7 +590,6 @@ static int __init optee_driver_init(void) return -ENODEV; np = of_find_matching_node(fw_np, optee_match); - of_node_put(fw_np); if (!np) return -ENODEV; From b04c22da18b53c7be730478282f7e6f62cef9425 Mon Sep 17 00:00:00 2001 From: Derek Basehore Date: Tue, 29 Aug 2017 13:34:34 -0700 Subject: [PATCH 0904/3326] backlight: pwm_bl: Fix overflow condition [ Upstream commit 5d0c49acebc9488e37db95f1d4a55644e545ffe7 ] This fixes an overflow condition that can happen with high max brightness and period values in compute_duty_cycle. This fixes it by using a 64 bit variable for computing the duty cycle. Signed-off-by: Derek Basehore Acked-by: Thierry Reding Reviewed-by: Brian Norris Signed-off-by: Lee Jones Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/video/backlight/pwm_bl.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 9bd17682655a53..1c2289ddd555a6 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -79,14 +79,17 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb) static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness) { unsigned int lth = pb->lth_brightness; - int duty_cycle; + u64 duty_cycle; if (pb->levels) duty_cycle = pb->levels[brightness]; else duty_cycle = brightness; - return (duty_cycle * (pb->period - lth) / pb->scale) + lth; + duty_cycle *= pb->period - lth; + do_div(duty_cycle, pb->scale); + + return duty_cycle + lth; } static int pwm_backlight_update_status(struct backlight_device *bl) From 5d583a7e2d92a723e9833604f760517a64f630cc Mon Sep 17 00:00:00 2001 From: Shashank Sharma Date: Thu, 12 Oct 2017 22:10:08 +0530 Subject: [PATCH 0905/3326] drm: Add retries for lspcon mode detection [ Upstream commit f687e25a7a245952349f1f9f9cc238ac5a3be258 ] >From the CI builds, its been observed that during a driver reload/insert, dp dual mode read function sometimes fails to read from LSPCON device over i2c-over-aux channel. This patch: - adds some delay and few retries, allowing a scope for these devices to settle down and respond. - changes one error log's level from ERROR->DEBUG as we want to call it an error only after all the retries are exhausted. V2: Addressed review comments from Jani (for loop for retry) V3: Addressed review comments from Imre (break on partial read too) V3: Addressed review comments from Ville/Imre (Add the retries exclusively for LSPCON, not for all dp_dual_mode devices) V4: Added r-b from Imre, sending it to dri-devel (Jani) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=102294 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=102295 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=102359 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103186 Cc: Ville Syrjala Cc: Imre Deak Cc: Jani Nikula Reviewed-by: Imre Deak Acked-by: Dave Airlie Signed-off-by: Shashank Sharma Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1507826408-19322-1-git-send-email-shashank.sharma@intel.com Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/drm_dp_dual_mode_helper.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 0ef9011a185635..02a50929af6759 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -410,6 +410,7 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter, { u8 data; int ret = 0; + int retry; if (!mode) { DRM_ERROR("NULL input\n"); @@ -417,10 +418,19 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter, } /* Read Status: i2c over aux */ - ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE, - &data, sizeof(data)); + for (retry = 0; retry < 6; retry++) { + if (retry) + usleep_range(500, 1000); + + ret = drm_dp_dual_mode_read(adapter, + DP_DUAL_MODE_LSPCON_CURRENT_MODE, + &data, sizeof(data)); + if (!ret) + break; + } + if (ret < 0) { - DRM_ERROR("LSPCON read(0x80, 0x41) failed\n"); + DRM_DEBUG_KMS("LSPCON read(0x80, 0x41) failed\n"); return -EFAULT; } From a7455b113feff0b99cad533c63bc6b877c1a1960 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Thu, 12 Oct 2017 16:36:58 +0800 Subject: [PATCH 0906/3326] clk: sunxi-ng: nm: Check if requested rate is supported by fractional clock [ Upstream commit 4cdbc40d64d4b8303a97e29a52862e4d99502beb ] The round_rate callback for N-M-factor style clocks does not check if the requested clock rate is supported by the fractional clock mode. While this doesn't affect usage in practice, since the clock rates are also supported through N-M factors, it does not match the set_rate code. Add a check to the round_rate callback so it matches the set_rate callback. Fixes: 6174a1e24b0d ("clk: sunxi-ng: Add N-M-factor clock support") Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clk/sunxi-ng/ccu_nm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c index a32158e8f2e35a..84a5e7f17f6f54 100644 --- a/drivers/clk/sunxi-ng/ccu_nm.c +++ b/drivers/clk/sunxi-ng/ccu_nm.c @@ -99,6 +99,9 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate, struct ccu_nm *nm = hw_to_ccu_nm(hw); struct _ccu_nm _nm; + if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) + return rate; + _nm.min_n = nm->n.min ?: 1; _nm.max_n = nm->n.max ?: 1 << nm->n.width; _nm.min_m = 1; From 9fe2989cdf3d433ab07d9e5979c5a811e26dd925 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Thu, 12 Oct 2017 16:36:57 +0800 Subject: [PATCH 0907/3326] clk: sunxi-ng: sun5i: Fix bit offset of audio PLL post-divider [ Upstream commit d51fe3ba9773c8b6fc79f82bbe75d64baf604292 ] The post-divider for the audio PLL is in bits [29:26], as specified in the user manual, not [19:16] as currently programmed in the code. The post-divider has a default register value of 2, i.e. a divider of 3. This means the clock rate fed to the audio codec would be off. This was discovered when porting sigma-delta modulation for the PLL to sun5i, which needs the post-divider to be 1. Fix the bit offset, so we do actually force the post-divider to a certain value. Fixes: 5e73761786d6 ("clk: sunxi-ng: Add sun5i CCU driver") Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clk/sunxi-ng/ccu-sun5i.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c index ab9e850b370783..2f385a57cd911c 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.c +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c @@ -982,8 +982,8 @@ static void __init sun5i_ccu_init(struct device_node *node, /* Force the PLL-Audio-1x divider to 4 */ val = readl(reg + SUN5I_PLL_AUDIO_REG); - val &= ~GENMASK(19, 16); - writel(val | (3 << 16), reg + SUN5I_PLL_AUDIO_REG); + val &= ~GENMASK(29, 26); + writel(val | (3 << 26), reg + SUN5I_PLL_AUDIO_REG); /* * Use the peripheral PLL as the AHB parent, instead of CPU / From 714abd2d6996bb36b334dd15c6c52d3ad00766a0 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Wed, 4 Oct 2017 01:00:08 +0200 Subject: [PATCH 0908/3326] crypto: crypto4xx - increase context and scatter ring buffer elements [ Upstream commit 778f81d6cdb7d25360f082ac0384d5103f04eca5 ] If crypto4xx is used in conjunction with dm-crypt, the available ring buffer elements are not enough to handle the load properly. On an aes-cbc-essiv:sha256 encrypted swap partition the read performance is abyssal: (tested with hdparm -t) /dev/mapper/swap_crypt: Timing buffered disk reads: 14 MB in 3.68 seconds = 3.81 MB/sec The patch increases both PPC4XX_NUM_SD and PPC4XX_NUM_PD to 256. This improves the performance considerably: /dev/mapper/swap_crypt: Timing buffered disk reads: 104 MB in 3.03 seconds = 34.31 MB/sec Furthermore, PPC4XX_LAST_SD, PPC4XX_LAST_GD and PPC4XX_LAST_PD can be easily calculated from their respective PPC4XX_NUM_* constant. Signed-off-by: Christian Lamparter Signed-off-by: Herbert Xu Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/amcc/crypto4xx_core.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index ecfdcfe3698d62..4f41d6da5acca7 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -34,12 +34,12 @@ #define PPC405EX_CE_RESET 0x00000008 #define CRYPTO4XX_CRYPTO_PRIORITY 300 -#define PPC4XX_LAST_PD 63 -#define PPC4XX_NUM_PD 64 -#define PPC4XX_LAST_GD 1023 +#define PPC4XX_NUM_PD 256 +#define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1) #define PPC4XX_NUM_GD 1024 -#define PPC4XX_LAST_SD 63 -#define PPC4XX_NUM_SD 64 +#define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1) +#define PPC4XX_NUM_SD 256 +#define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1) #define PPC4XX_SD_BUFFER_SIZE 2048 #define PD_ENTRY_INUSE 1 From bd5139895727bcf97a411bf17fc137fb0d57f55c Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Sun, 8 Oct 2017 11:39:49 +0200 Subject: [PATCH 0909/3326] crypto: lrw - Fix an error handling path in 'create()' [ Upstream commit 616129cc6e75fb4da6681c16c981fa82dfe5e4c7 ] All error handling paths 'goto err_drop_spawn' except this one. In order to avoid some resources leak, we should do it as well here. Fixes: 700cb3f5fe75 ("crypto: lrw - Convert to skcipher") Signed-off-by: Christophe JAILLET Signed-off-by: Herbert Xu Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- crypto/lrw.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crypto/lrw.c b/crypto/lrw.c index a8bfae4451bfcb..eb681e9fe5743f 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -610,8 +610,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) ecb_name[len - 1] = 0; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; + "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) { + err = -ENAMETOOLONG; + goto err_drop_spawn; + } } inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; From af826fdfb14c51fc4416993d0fcef8f4aa43b54c Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 29 Sep 2017 11:22:15 +0100 Subject: [PATCH 0910/3326] rtc: pl031: make interrupt optional [ Upstream commit 5b64a2965dfdfca8039e93303c64e2b15c19ff0c ] On some platforms, the interrupt for the PL031 is optional. Avoid trying to claim the interrupt if it's not specified. Reviewed-by: Linus Walleij Signed-off-by: Russell King Signed-off-by: Alexandre Belloni Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/rtc/rtc-pl031.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index e1687e19c59f4a..a30f24cb6c83c0 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -308,7 +308,8 @@ static int pl031_remove(struct amba_device *adev) dev_pm_clear_wake_irq(&adev->dev); device_init_wakeup(&adev->dev, false); - free_irq(adev->irq[0], ldata); + if (adev->irq[0]) + free_irq(adev->irq[0], ldata); rtc_device_unregister(ldata->rtc); iounmap(ldata->base); kfree(ldata); @@ -381,12 +382,13 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_rtc; } - if (request_irq(adev->irq[0], pl031_interrupt, - vendor->irqflags, "rtc-pl031", ldata)) { - ret = -EIO; - goto out_no_irq; + if (adev->irq[0]) { + ret = request_irq(adev->irq[0], pl031_interrupt, + vendor->irqflags, "rtc-pl031", ldata); + if (ret) + goto out_no_irq; + dev_pm_set_wake_irq(&adev->dev, adev->irq[0]); } - dev_pm_set_wake_irq(&adev->dev, adev->irq[0]); return 0; out_no_irq: From f3a68b4b82f3f19b80b84cf11753082e7fb6ec8f Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Thu, 5 Oct 2017 18:07:24 -0700 Subject: [PATCH 0911/3326] kvm, mm: account kvm related kmem slabs to kmemcg [ Upstream commit 46bea48ac241fe0b413805952dda74dd0c09ba8b ] The kvm slabs can consume a significant amount of system memory and indeed in our production environment we have observed that a lot of machines are spending significant amount of memory that can not be left as system memory overhead. Also the allocations from these slabs can be triggered directly by user space applications which has access to kvm and thus a buggy application can leak such memory. So, these caches should be accounted to kmemcg. Signed-off-by: Shakeel Butt Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/mmu.c | 4 ++-- virt/kvm/kvm_main.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7a69cf05371119..13ebeedcec07b1 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5476,13 +5476,13 @@ int kvm_mmu_module_init(void) pte_list_desc_cache = kmem_cache_create("pte_list_desc", sizeof(struct pte_list_desc), - 0, 0, NULL); + 0, SLAB_ACCOUNT, NULL); if (!pte_list_desc_cache) goto nomem; mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", sizeof(struct kvm_mmu_page), - 0, 0, NULL); + 0, SLAB_ACCOUNT, NULL); if (!mmu_page_header_cache) goto nomem; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 484e8820c382b3..2447d7c017e707 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4018,7 +4018,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, if (!vcpu_align) vcpu_align = __alignof__(struct kvm_vcpu); kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, - 0, NULL); + SLAB_ACCOUNT, NULL); if (!kvm_vcpu_cache) { r = -ENOMEM; goto out_free_3; From a5171fe705fb31bf3a0d93e8d69285d2b5b691c9 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Tue, 10 Oct 2017 12:42:56 -0500 Subject: [PATCH 0912/3326] net: phy: at803x: Change error to EINVAL for invalid MAC [ Upstream commit fc7556877d1748ac00958822a0a3bba1d4bd9e0d ] Change the return error code to EINVAL if the MAC address is not valid in the set_wol function. Signed-off-by: Dan Murphy Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/at803x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index c1e52b9dc58d39..5f93e6add56394 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -167,7 +167,7 @@ static int at803x_set_wol(struct phy_device *phydev, mac = (const u8 *) ndev->dev_addr; if (!is_valid_ether_addr(mac)) - return -EFAULT; + return -EINVAL; for (i = 0; i < 3; i++) { phy_write(phydev, AT803X_MMD_ACCESS_CONTROL, From bfd66a406fe7e590055c1d6714adc697f18664c8 Mon Sep 17 00:00:00 2001 From: David Daney Date: Fri, 8 Sep 2017 10:10:31 +0200 Subject: [PATCH 0913/3326] PCI: Avoid bus reset if bridge itself is broken [ Upstream commit 357027786f3523d26f42391aa4c075b8495e5d28 ] When checking to see if a PCI bus can safely be reset, we previously checked to see if any of the children had their PCI_DEV_FLAGS_NO_BUS_RESET flag set. Children marked with that flag are known not to behave well after a bus reset. Some PCIe root port bridges also do not behave well after a bus reset, sometimes causing the devices behind the bridge to become unusable. Add a check for PCI_DEV_FLAGS_NO_BUS_RESET being set in the bridge device to allow these bridges to be flagged, and prevent their secondary buses from being reset. Signed-off-by: David Daney [jglauber@cavium.com: fixed typo] Signed-off-by: Jan Glauber Signed-off-by: Bjorn Helgaas Reviewed-by: Alex Williamson Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/pci/pci.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6078dfc11b112a..74f1c57ab93b60 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4356,6 +4356,10 @@ static bool pci_bus_resetable(struct pci_bus *bus) { struct pci_dev *dev; + + if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)) + return false; + list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resetable(dev->subordinate))) From 3aaaf02c110f36dc716780412add4799b2a89e5b Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Wed, 11 Oct 2017 19:33:07 +0530 Subject: [PATCH 0914/3326] scsi: cxgb4i: fix Tx skb leak [ Upstream commit 9b3a081fb62158b50bcc90522ca2423017544367 ] In case of connection reset Tx skb queue can have some skbs which are not transmitted so purge Tx skb queue in release_offload_resources() to avoid skb leak. Signed-off-by: Varun Prakash Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 1d02cf9fe06c5e..30d5f0ef29bbff 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1575,6 +1575,7 @@ static void release_offload_resources(struct cxgbi_sock *csk) csk, csk->state, csk->flags, csk->tid); cxgbi_sock_free_cpl_skbs(csk); + cxgbi_sock_purge_write_queue(csk); if (csk->wr_cred != csk->wr_max_cred) { cxgbi_sock_purge_wr_queue(csk); cxgbi_sock_reset_wr_list(csk); From 6d95d05bafbafc88a13d81e7ad63fcb899de499d Mon Sep 17 00:00:00 2001 From: Sreekanth Reddy Date: Tue, 10 Oct 2017 18:41:18 +0530 Subject: [PATCH 0915/3326] scsi: mpt3sas: Fix IO error occurs on pulling out a drive from RAID1 volume created on two SATA drive [ Upstream commit 2ce9a3645299ba1752873d333d73f67620f4550b ] Whenever an I/O for a RAID volume fails with IOCStatus MPI2_IOCSTATUS_SCSI_IOC_TERMINATED and SCSIStatus equal to (MPI2_SCSI_STATE_TERMINATED | MPI2_SCSI_STATE_NO_SCSI_STATUS) then return the I/O to SCSI midlayer with "DID_RESET" (i.e. retry the IO infinite times) set in the host byte. Previously, the driver was completing the I/O with "DID_SOFT_ERROR" which causes the I/O to be quickly retried. However, firmware needed more time and hence I/Os were failing. Signed-off-by: Sreekanth Reddy Reviewed-by: Tomas Henzl Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 22998cbd538f9c..33ff691878e285 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -4804,6 +4804,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { scmd->result = DID_RESET << 16; break; + } else if ((scmd->device->channel == RAID_CHANNEL) && + (scsi_state == (MPI2_SCSI_STATE_TERMINATED | + MPI2_SCSI_STATE_NO_SCSI_STATUS))) { + scmd->result = DID_RESET << 16; + break; } scmd->result = DID_SOFT_ERROR << 16; break; From 7af9f9cd68c7b6f009c4a0c0d8ea7703aa46a26b Mon Sep 17 00:00:00 2001 From: Stuart Hayes Date: Wed, 4 Oct 2017 10:57:52 -0500 Subject: [PATCH 0916/3326] PCI: Create SR-IOV virtfn/physfn links before attaching driver [ Upstream commit 27d6162944b9b34c32cd5841acd21786637ee743 ] When creating virtual functions, create the "virtfn%u" and "physfn" links in sysfs *before* attaching the driver instead of after. When we attach the driver to the new virtual network interface first, there is a race when the driver attaches to the new sends out an "add" udev event, and the network interface naming software (biosdevname or systemd, for example) tries to look at these links. Signed-off-by: Stuart Hayes Signed-off-by: Bjorn Helgaas Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/pci/iov.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index ac41c8be9200ac..0fd8e164339c3e 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -162,7 +162,6 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset) pci_device_add(virtfn, virtfn->bus); - pci_bus_add_device(virtfn); sprintf(buf, "virtfn%u", id); rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf); if (rc) @@ -173,6 +172,8 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset) kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE); + pci_bus_add_device(virtfn); + return 0; failed2: From 349384cd7028affdb23634b56d10da015b080cba Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 29 Sep 2017 14:39:49 -0300 Subject: [PATCH 0917/3326] PM / OPP: Move error message to debug level [ Upstream commit 035ed07208dc501d023873447113f3f178592156 ] On some i.MX6 platforms which do not have speed grading check, opp table will not be created in platform code, so cpufreq driver prints the following error message: cpu cpu0: dev_pm_opp_get_opp_count: OPP table not found (-19) However, this is not really an error in this case because the imx6q-cpufreq driver first calls dev_pm_opp_get_opp_count() and if it fails, it means that platform code does not provide OPP and then dev_pm_opp_of_add_table() will be called. In order to avoid such confusing error message, move it to debug level. It is up to the caller of dev_pm_opp_get_opp_count() to check its return value and decide if it will print an error or not. Signed-off-by: Fabio Estevam Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/base/power/opp/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index a6de325306933b..0459b120469456 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -296,7 +296,7 @@ int dev_pm_opp_get_opp_count(struct device *dev) opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { count = PTR_ERR(opp_table); - dev_err(dev, "%s: OPP table not found (%d)\n", + dev_dbg(dev, "%s: OPP table not found (%d)\n", __func__, count); return count; } From 66efe26b0b074ef11575b883fc9a93af51aab597 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 27 Aug 2017 08:39:51 +0200 Subject: [PATCH 0918/3326] igb: check memory allocation failure [ Upstream commit 18eb86362a52f0af933cc0fd5e37027317eb2d1c ] Check memory allocation failures and return -ENOMEM in such cases, as already done for other memory allocations in this function. This avoids NULL pointers dereference. Signed-off-by: Christophe JAILLET Tested-by: Aaron Brown Acked-by: PJ Waskiewicz Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/igb/igb_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b0031c5ff767a4..667dbc7d4a4ec6 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3162,6 +3162,8 @@ static int igb_sw_init(struct igb_adapter *adapter) /* Setup and initialize a copy of the hw vlan table array */ adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), GFP_ATOMIC); + if (!adapter->shadow_vfta) + return -ENOMEM; /* This call may decrease the number of queues */ if (igb_init_interrupt_scheme(adapter, true)) { From c817cb56b8d62cf2677988cf5fa70cc515a526e1 Mon Sep 17 00:00:00 2001 From: Lihong Yang Date: Thu, 7 Sep 2017 08:05:46 -0400 Subject: [PATCH 0919/3326] i40e: use the safe hash table iterator when deleting mac filters [ Upstream commit 784548c40d6f43eff2297220ad7800dc04be03c6 ] This patch replaces hash_for_each function with hash_for_each_safe when calling __i40e_del_filter. The hash_for_each_safe function is the right one to use when iterating over a hash table to safely remove a hash entry. Otherwise, incorrect values may be read from freed memory. Detected by CoverityScan, CID 1402048 Read from pointer after free Signed-off-by: Lihong Yang Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 4d1e670f490edf..fa25ea512e43e5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2779,6 +2779,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) struct i40e_mac_filter *f; struct i40e_vf *vf; int ret = 0; + struct hlist_node *h; int bkt; /* validate the request */ @@ -2817,7 +2818,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* Delete all the filters for this VSI - we're going to kill it * anyway. */ - hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) __i40e_del_filter(vsi, f); spin_unlock_bh(&vsi->mac_filter_hash_lock); From d1f13dcad56bdd655a29f33b5ac84e0fae48a4eb Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Wed, 30 Aug 2017 13:50:39 +0200 Subject: [PATCH 0920/3326] iio: st_sensors: add register mask for status register [ Upstream commit e72a060151e5bb673af24993665e270fc4f674a7 ] Introduce register mask for data-ready status register since pressure sensors (e.g. LPS22HB) export just two channels (BIT(0) and BIT(1)) and BIT(2) is marked reserved while in st_sensors_new_samples_available() value read from status register is masked using 0x7. Moreover do not mask status register using active_scan_mask since now status value is properly masked and if the result is not zero the interrupt has to be consumed by the driver. This fix an issue on LPS25H and LPS331AP where channel definition is swapped respect to status register. Furthermore that change allows to properly support new devices (e.g LIS2DW12) that report just ZYXDA (data-ready) field in status register to figure out if the interrupt has been generated by the device. Fixes: 97865fe41322 (iio: st_sensors: verify interrupt event to status) Signed-off-by: Lorenzo Bianconi Reviewed-by: Linus Walleij Signed-off-by: Jonathan Cameron Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/iio/accel/st_accel_core.c | 35 +++++++++++++++---- .../iio/common/st_sensors/st_sensors_core.c | 2 +- .../common/st_sensors/st_sensors_trigger.c | 16 +++------ drivers/iio/gyro/st_gyro_core.c | 15 ++++++-- drivers/iio/magnetometer/st_magn_core.c | 10 ++++-- drivers/iio/pressure/st_pressure_core.c | 15 ++++++-- include/linux/iio/common/st_sensors.h | 7 ++-- 7 files changed, 70 insertions(+), 30 deletions(-) diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 752856b3a84970..379de1829cdb4b 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -164,7 +164,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int2 = 0x00, .addr_ihl = 0x25, .mask_ihl = 0x02, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x23, @@ -236,7 +239,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x22, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x23, @@ -318,7 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int2 = 0x00, .addr_ihl = 0x23, .mask_ihl = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, .ig1 = { .en_addr = 0x23, .en_mask = 0x08, @@ -389,7 +398,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .drdy_irq = { .addr = 0x21, .mask_int1 = 0x04, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x21, @@ -451,7 +463,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x22, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x21, @@ -569,7 +584,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .drdy_irq = { .addr = 0x21, .mask_int1 = 0x04, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x21, @@ -640,7 +658,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int2 = 0x00, .addr_ihl = 0x25, .mask_ihl = 0x02, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x23, diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 02e833b14db08d..34115f05d5c464 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -470,7 +470,7 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable) * different one. Take into account irq status register * to understand if irq trigger can be properly supported */ - if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) + if (sdata->sensor_settings->drdy_irq.stat_drdy.addr) sdata->hw_irq_trigger = enable; return 0; } diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c index fa73e679535985..fdcc5a8919587b 100644 --- a/drivers/iio/common/st_sensors/st_sensors_trigger.c +++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c @@ -31,7 +31,7 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev, int ret; /* How would I know if I can't check it? */ - if (!sdata->sensor_settings->drdy_irq.addr_stat_drdy) + if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) return -EINVAL; /* No scan mask, no interrupt */ @@ -39,23 +39,15 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev, return 0; ret = sdata->tf->read_byte(&sdata->tb, sdata->dev, - sdata->sensor_settings->drdy_irq.addr_stat_drdy, + sdata->sensor_settings->drdy_irq.stat_drdy.addr, &status); if (ret < 0) { dev_err(sdata->dev, "error checking samples available\n"); return ret; } - /* - * the lower bits of .active_scan_mask[0] is directly mapped - * to the channels on the sensor: either bit 0 for - * one-dimensional sensors, or e.g. x,y,z for accelerometers, - * gyroscopes or magnetometers. No sensor use more than 3 - * channels, so cut the other status bits here. - */ - status &= 0x07; - if (status & (u8)indio_dev->active_scan_mask[0]) + if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask) return 1; return 0; @@ -212,7 +204,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, * it was "our" interrupt. */ if (sdata->int_pin_open_drain && - sdata->sensor_settings->drdy_irq.addr_stat_drdy) + sdata->sensor_settings->drdy_irq.stat_drdy.addr) irq_trig |= IRQF_SHARED; err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev), diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c index e366422e85127f..2536a8400c9896 100644 --- a/drivers/iio/gyro/st_gyro_core.c +++ b/drivers/iio/gyro/st_gyro_core.c @@ -118,7 +118,10 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { * drain settings, but only for INT1 and not * for the DRDY line on INT2. */ - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .multi_read_bit = true, .bootime = 2, @@ -188,7 +191,10 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { * drain settings, but only for INT1 and not * for the DRDY line on INT2. */ - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .multi_read_bit = true, .bootime = 2, @@ -253,7 +259,10 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { * drain settings, but only for INT1 and not * for the DRDY line on INT2. */ - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .multi_read_bit = true, .bootime = 2, diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 08aafba4481c66..19031a7bce235f 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -317,7 +317,10 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = { }, .drdy_irq = { /* drdy line is routed drdy pin */ - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .multi_read_bit = true, .bootime = 2, @@ -361,7 +364,10 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = { .drdy_irq = { .addr = 0x62, .mask_int1 = 0x01, - .addr_stat_drdy = 0x67, + .stat_drdy = { + .addr = 0x67, + .mask = 0x07, + }, }, .multi_read_bit = false, .bootime = 2, diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 34611a8ea2cea5..ea075fcd5a6f6f 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -287,7 +287,10 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x22, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x03, + }, }, .multi_read_bit = true, .bootime = 2, @@ -395,7 +398,10 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x22, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x03, + }, }, .multi_read_bit = true, .bootime = 2, @@ -454,7 +460,10 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x12, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x03, + }, }, .multi_read_bit = false, .bootime = 2, diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 7b0fa8b5c120d4..ce0ef1c0a30ab4 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -139,7 +139,7 @@ struct st_sensor_das { * @mask_ihl: mask to enable/disable active low on the INT lines. * @addr_od: address to enable/disable Open Drain on the INT lines. * @mask_od: mask to enable/disable Open Drain on the INT lines. - * @addr_stat_drdy: address to read status of DRDY (data ready) interrupt + * struct stat_drdy - status register of DRDY (data ready) interrupt. * struct ig1 - represents the Interrupt Generator 1 of sensors. * @en_addr: address of the enable ig1 register. * @en_mask: mask to write the on/off value for enable. @@ -152,7 +152,10 @@ struct st_sensor_data_ready_irq { u8 mask_ihl; u8 addr_od; u8 mask_od; - u8 addr_stat_drdy; + struct { + u8 addr; + u8 mask; + } stat_drdy; struct { u8 en_addr; u8 en_mask; From afccf6f360df511a432d4c2b99f1eb8d9ac369e6 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Mon, 11 Sep 2017 14:21:31 -0700 Subject: [PATCH 0921/3326] ixgbe: fix use of uninitialized padding [ Upstream commit dcfd6b839c998bc9838e2a47f44f37afbdf3099c ] This patch is resolving Coverity hits where padding in a structure could be used uninitialized. - Initialize fwd_cmd.pad/2 before ixgbe_calculate_checksum() - Initialize buffer.pad2/3 before ixgbe_hic_unlocked() Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 4 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 6e6ab6f6875ebc..64429a14c630f0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3781,10 +3781,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, fw_cmd.ver_build = build; fw_cmd.ver_sub = sub; fw_cmd.hdr.checksum = 0; - fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, - (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); fw_cmd.pad = 0; fw_cmd.pad2 = 0; + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ret_val = ixgbe_host_interface_command(hw, &fw_cmd, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 19fbb2f28ea457..8a85217845ae43 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -900,6 +900,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, /* convert offset from words to bytes */ buffer.address = cpu_to_be32((offset + current_word) * 2); buffer.length = cpu_to_be16(words_to_read * 2); + buffer.pad2 = 0; + buffer.pad3 = 0; status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT); From afdbec5d3c652e8d731861c460eee96ef9498315 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 8 Sep 2017 15:37:45 +0100 Subject: [PATCH 0922/3326] IB/rxe: check for allocation failure on elem [ Upstream commit 4831ca9e4a8e48cb27e0a792f73250390827a228 ] The allocation for elem may fail (especially because we're using GFP_ATOMIC) so best to check for a null return. This fixes a potential null pointer dereference when assigning elem->pool. Detected by CoverityScan CID#1357507 ("Dereference null return value") Fixes: 8700e3e7c485 ("Soft RoCE driver") Signed-off-by: Colin Ian King Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/sw/rxe/rxe_pool.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c index c1b5f38f31a58f..3b4916680018a7 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.c +++ b/drivers/infiniband/sw/rxe/rxe_pool.c @@ -404,6 +404,8 @@ void *rxe_alloc(struct rxe_pool *pool) elem = kmem_cache_zalloc(pool_cache(pool), (pool->flags & RXE_POOL_ATOMIC) ? GFP_ATOMIC : GFP_KERNEL); + if (!elem) + return NULL; elem->pool = pool; kref_init(&elem->ref_cnt); From 7535afccf97c9dd1559e0b2d7586a522020e5522 Mon Sep 17 00:00:00 2001 From: Luca Miccio Date: Mon, 9 Oct 2017 16:27:21 +0200 Subject: [PATCH 0923/3326] block,bfq: Disable writeback throttling [ Upstream commit b5dc5d4d1f4ff9032eb6c21a3c571a1317dc9289 ] Similarly to CFQ, BFQ has its write-throttling heuristics, and it is better not to combine them with further write-throttling heuristics of a different nature. So this commit disables write-back throttling for a device if BFQ is used as I/O scheduler for that device. Signed-off-by: Luca Miccio Signed-off-by: Paolo Valente Tested-by: Oleksandr Natalenko Tested-by: Lee Tibbert Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- block/bfq-iosched.c | 3 ++- block/blk-wbt.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index a4783da90ba882..0f860cf0d56d12 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -108,6 +108,7 @@ #include "blk-mq-tag.h" #include "blk-mq-sched.h" #include "bfq-iosched.h" +#include "blk-wbt.h" #define BFQ_BFQQ_FNS(name) \ void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ @@ -4775,7 +4776,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) bfq_init_root_group(bfqd->root_group, bfqd); bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); - + wbt_disable_default(q); return 0; out_free: diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 6a9a0f03a67bd9..e59d59c11ebbb3 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -654,7 +654,7 @@ void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on) } /* - * Disable wbt, if enabled by default. Only called from CFQ. + * Disable wbt, if enabled by default. */ void wbt_disable_default(struct request_queue *q) { From 6d7bdad132d59c429dc340f8d10a4977e58813b0 Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Mon, 9 Oct 2017 10:32:48 +0800 Subject: [PATCH 0924/3326] md: always set THREAD_WAKEUP and wake up wqueue if thread existed [ Upstream commit d1d90147c9680aaec4a5757932c2103c42c9c23b ] Since commit 4ad23a976413 ("MD: use per-cpu counter for writes_pending"), the wait_queue is only got invoked if THREAD_WAKEUP is not set previously. With above change, I can see process_metadata_update could always hang on the wait queue, because mddev->thread could stay on 'D' status and the THREAD_WAKEUP flag is not cleared since there are lots of place to wake up mddev->thread. Then deadlock happened as follows: linux175:~ # ps aux|grep md|grep D root 20117 0.0 0.0 0 0 ? D 03:45 0:00 [md0_raid1] root 20125 0.0 0.0 0 0 ? D 03:45 0:00 [md0_cluster_rec] linux175:~ # cat /proc/20117/stack [] dlm_lock_sync+0x94/0xd0 [md_cluster] [] lock_token+0x34/0xd0 [md_cluster] [] metadata_update_start+0x64/0x110 [md_cluster] [] md_update_sb.part.58+0x9b/0x860 [md_mod] [] md_update_sb+0x15/0x30 [md_mod] [] md_check_recovery+0x266/0x490 [md_mod] [] raid1d+0x42/0x810 [raid1] [] md_thread+0x122/0x150 [md_mod] [] kthread+0x101/0x140 linux175:~ # cat /proc/20125/stack [] recv_daemon+0x3f9/0x5c0 [md_cluster] [] md_thread+0x122/0x150 [md_mod] [] kthread+0x101/0x140 So let's revert the part of code in the commit to resovle the problem since we can't get lots of benefits of previous change. Fixes: 4ad23a976413 ("MD: use per-cpu counter for writes_pending") Signed-off-by: Guoqing Jiang Signed-off-by: Shaohua Li Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/md/md.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index 98ea86309ceb44..6bf093cef9584c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7468,8 +7468,8 @@ void md_wakeup_thread(struct md_thread *thread) { if (thread) { pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); - if (!test_and_set_bit(THREAD_WAKEUP, &thread->flags)) - wake_up(&thread->wqueue); + set_bit(THREAD_WAKEUP, &thread->flags); + wake_up(&thread->wqueue); } } EXPORT_SYMBOL(md_wakeup_thread); From 2f48fc1742a2da6196482695ec3bcd2438d24a13 Mon Sep 17 00:00:00 2001 From: William Tu Date: Thu, 5 Oct 2017 12:07:12 -0700 Subject: [PATCH 0925/3326] ip_gre: check packet length and mtu correctly in erspan tx [ Upstream commit f192970de860d3ab90aa9e2a22853201a57bde78 ] Similarly to early patch for erspan_xmit(), the ARPHDR_ETHER device is the length of the whole ether packet. So skb->len should subtract the dev->hard_header_len. Fixes: 1a66a836da63 ("gre: add collect_md mode to ERSPAN tunnel") Fixes: 84e54fe0a5ea ("gre: introduce native tunnel support for ERSPAN") Signed-off-by: William Tu Cc: Xin Long Cc: David Laight Reviewed-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv4/ip_gre.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 467e44d7587dcf..045331204097a6 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -579,8 +579,8 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, if (gre_handle_offloads(skb, false)) goto err_free_rt; - if (skb->len > dev->mtu) { - pskb_trim(skb, dev->mtu); + if (skb->len > dev->mtu + dev->hard_header_len) { + pskb_trim(skb, dev->mtu + dev->hard_header_len); truncate = true; } @@ -731,8 +731,8 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, if (skb_cow_head(skb, dev->needed_headroom)) goto free_skb; - if (skb->len - dev->hard_header_len > dev->mtu) { - pskb_trim(skb, dev->mtu); + if (skb->len > dev->mtu + dev->hard_header_len) { + pskb_trim(skb, dev->mtu + dev->hard_header_len); truncate = true; } From 9704f8147e88213f2fa580f713b42b08a4f1a7d2 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Fri, 6 Oct 2017 12:06:04 -0700 Subject: [PATCH 0926/3326] ipv6: grab rt->rt6i_ref before allocating pcpu rt [ Upstream commit a94b9367e044ba672c9f4105eb1516ff6ff4948a ] After rwlock is replaced with rcu and spinlock, ip6_pol_route() will be called with only rcu held. That means rt6 route deletion could happen simultaneously with rt6_make_pcpu_rt(). This could potentially cause memory leak if rt6_release() is called right before rt6_make_pcpu_rt() on the same route. This patch grabs rt->rt6i_ref safely before calling rt6_make_pcpu_rt() to make sure rt6_release() will not get triggered while rt6_make_pcpu_rt() is in progress. And rt6_release() is called after rt6_make_pcpu_rt() is finished. Note: As we are incrementing rt->rt6i_ref in ip6_pol_route(), there is a very slim chance that fib6_purge_rt() will be triggered unnecessarily when deleting a route if ip6_pol_route() running on another thread picks this route as well and tries to make pcpu cache for it. Signed-off-by: Wei Wang Signed-off-by: Martin KaFai Lau Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv6/route.c | 58 ++++++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 598efa8cfe250c..76b47682f77f27 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1055,7 +1055,6 @@ static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt) { - struct fib6_table *table = rt->rt6i_table; struct rt6_info *pcpu_rt, *prev, **p; pcpu_rt = ip6_rt_pcpu_alloc(rt); @@ -1066,28 +1065,20 @@ static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt) return net->ipv6.ip6_null_entry; } - read_lock_bh(&table->tb6_lock); - if (rt->rt6i_pcpu) { - p = this_cpu_ptr(rt->rt6i_pcpu); - prev = cmpxchg(p, NULL, pcpu_rt); - if (prev) { - /* If someone did it before us, return prev instead */ - dst_release_immediate(&pcpu_rt->dst); - pcpu_rt = prev; - } - } else { - /* rt has been removed from the fib6 tree - * before we have a chance to acquire the read_lock. - * In this case, don't brother to create a pcpu rt - * since rt is going away anyway. The next - * dst_check() will trigger a re-lookup. - */ + dst_hold(&pcpu_rt->dst); + p = this_cpu_ptr(rt->rt6i_pcpu); + prev = cmpxchg(p, NULL, pcpu_rt); + if (prev) { + /* If someone did it before us, return prev instead */ + /* release refcnt taken by ip6_rt_pcpu_alloc() */ dst_release_immediate(&pcpu_rt->dst); - pcpu_rt = rt; + /* release refcnt taken by above dst_hold() */ + dst_release_immediate(&pcpu_rt->dst); + dst_hold(&prev->dst); + pcpu_rt = prev; } - dst_hold(&pcpu_rt->dst); + rt6_dst_from_metrics_check(pcpu_rt); - read_unlock_bh(&table->tb6_lock); return pcpu_rt; } @@ -1177,19 +1168,28 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, if (pcpu_rt) { read_unlock_bh(&table->tb6_lock); } else { - /* We have to do the read_unlock first - * because rt6_make_pcpu_route() may trigger - * ip6_dst_gc() which will take the write_lock. - */ - dst_hold(&rt->dst); - read_unlock_bh(&table->tb6_lock); - pcpu_rt = rt6_make_pcpu_route(rt); - dst_release(&rt->dst); + /* atomic_inc_not_zero() is needed when using rcu */ + if (atomic_inc_not_zero(&rt->rt6i_ref)) { + /* We have to do the read_unlock first + * because rt6_make_pcpu_route() may trigger + * ip6_dst_gc() which will take the write_lock. + * + * No dst_hold() on rt is needed because grabbing + * rt->rt6i_ref makes sure rt can't be released. + */ + read_unlock_bh(&table->tb6_lock); + pcpu_rt = rt6_make_pcpu_route(rt); + rt6_release(rt); + } else { + /* rt is already removed from tree */ + read_unlock_bh(&table->tb6_lock); + pcpu_rt = net->ipv6.ip6_null_entry; + dst_hold(&pcpu_rt->dst); + } } trace_fib6_table_lookup(net, pcpu_rt, table->tb6_id, fl6); return pcpu_rt; - } } EXPORT_SYMBOL_GPL(ip6_pol_route); From 44ee83c6d6e0b0913f32f3580270daa9c784cc0d Mon Sep 17 00:00:00 2001 From: Andrew Jeffery Date: Fri, 1 Sep 2017 15:08:58 +0930 Subject: [PATCH 0927/3326] leds: pca955x: Don't invert requested value in pca955x_gpio_set_value() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 52ca7d0f7bdad832b291ed979146443533ee79c0 ] The PCA9552 lines can be used either for driving LEDs or as GPIOs. The manual states that for LEDs, the operation is open-drain: The LSn LED select registers determine the source of the LED data. 00 = output is set LOW (LED on) 01 = output is set high-impedance (LED off; default) 10 = output blinks at PWM0 rate 11 = output blinks at PWM1 rate For GPIOs it suggests a pull-up so that the open-case drives the line high: For use as output, connect external pull-up resistor to the pin and size it according to the DC recommended operating characteristics. LED output pin is HIGH when the output is programmed as high-impedance, and LOW when the output is programmed LOW through the ‘LED selector’ register. The output can be pulse-width controlled when PWM0 or PWM1 are used. Now, I have a hardware design that uses the LED controller to control LEDs. However, for $reasons, we're using the leds-gpio driver to drive the them. The reasons are here are a tangent but lead to the discovery of the inversion, which manifested as the LEDs being set to full brightness at boot when we expected them to be off. As we're driving the LEDs through leds-gpio, this means wending our way through the gpiochip abstractions. So with that in mind we need to describe an active-low GPIO configuration to drive the LEDs as though they were GPIOs. The set() gpiochip callback in leds-pca955x does the following: ... if (val) pca955x_led_set(&led->led_cdev, LED_FULL); else pca955x_led_set(&led->led_cdev, LED_OFF); ... Where LED_FULL = 255. pca955x_led_set() in turn does: ... switch (value) { case LED_FULL: ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON); break; ... Where PCA955X_LS_LED_ON is defined as: #define PCA955X_LS_LED_ON 0x0 /* Output LOW */ So here we have some type confusion: We've crossed domains from GPIO behaviour to LED behaviour without accounting for possible inversions in the process. Stepping back to leds-gpio for a moment, during probe() we call create_gpio_led(), which eventually executes: if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP) { state = gpiod_get_value_cansleep(led_dat->gpiod); if (state < 0) return state; } else { state = (template->default_state == LEDS_GPIO_DEFSTATE_ON); } ... ret = gpiod_direction_output(led_dat->gpiod, state); In the devicetree the GPIO is annotated as active-low, and gpiod_get_value_cansleep() handles this for us: int gpiod_get_value_cansleep(const struct gpio_desc *desc) { int value; might_sleep_if(extra_checks); VALIDATE_DESC(desc); value = _gpiod_get_raw_value(desc); if (value < 0) return value; if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; return value; } _gpiod_get_raw_value() in turn calls through the get() callback for the gpiochip implementation, so returning to our get() implementation in leds-pca955x we find we extract the raw value from hardware: static int pca955x_gpio_get_value(struct gpio_chip *gc, unsigned int offset) { struct pca955x *pca955x = gpiochip_get_data(gc); struct pca955x_led *led = &pca955x->leds[offset]; u8 reg = pca955x_read_input(pca955x->client, led->led_num / 8); return !!(reg & (1 << (led->led_num % 8))); } This behaviour is not symmetric with that of set(), where the val is inverted by the driver. Closing the loop on the GPIO_ACTIVE_LOW inversions, gpiod_direction_output(), like gpiod_get_value_cansleep(), handles it for us: int gpiod_direction_output(struct gpio_desc *desc, int value) { VALIDATE_DESC(desc); if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; else value = !!value; return _gpiod_direction_output_raw(desc, value); } All-in-all, with a value of 'keep' for default-state property in a leds-gpio child node, the current state of the hardware will in-fact be inverted; precisely the opposite of what was intended. Rework leds-pca955x so that we avoid the incorrect inversion and clarify the semantics with respect to GPIO. Signed-off-by: Andrew Jeffery Reviewed-by: Cédric Le Goater Tested-by: Joel Stanley Tested-by: Matt Spinler Signed-off-by: Jacek Anaszewski Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/leds/leds-pca955x.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index 905729191d3e9b..78183f90820eac 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c @@ -61,6 +61,10 @@ #define PCA955X_LS_BLINK0 0x2 /* Blink at PWM0 rate */ #define PCA955X_LS_BLINK1 0x3 /* Blink at PWM1 rate */ +#define PCA955X_GPIO_INPUT LED_OFF +#define PCA955X_GPIO_HIGH LED_OFF +#define PCA955X_GPIO_LOW LED_FULL + enum pca955x_type { pca9550, pca9551, @@ -329,9 +333,9 @@ static int pca955x_set_value(struct gpio_chip *gc, unsigned int offset, struct pca955x_led *led = &pca955x->leds[offset]; if (val) - return pca955x_led_set(&led->led_cdev, LED_FULL); - else - return pca955x_led_set(&led->led_cdev, LED_OFF); + return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_HIGH); + + return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_LOW); } static void pca955x_gpio_set_value(struct gpio_chip *gc, unsigned int offset, @@ -355,8 +359,11 @@ static int pca955x_gpio_get_value(struct gpio_chip *gc, unsigned int offset) static int pca955x_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) { - /* To use as input ensure pin is not driven */ - return pca955x_set_value(gc, offset, 0); + struct pca955x *pca955x = gpiochip_get_data(gc); + struct pca955x_led *led = &pca955x->leds[offset]; + + /* To use as input ensure pin is not driven. */ + return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_INPUT); } static int pca955x_gpio_direction_output(struct gpio_chip *gc, From 56ea88ec49042b34f6ba10280c1fddf0703b5e57 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 4 Oct 2017 20:43:35 +0200 Subject: [PATCH 0928/3326] Bluetooth: hci_uart_set_flow_control: Fix NULL deref when using serdev [ Upstream commit 7841d554809b518a22349e7e39b6b63f8a48d0fb ] Fix a NULL pointer deref (hu->tty) when calling hci_uart_set_flow_control on hci_uart-s using serdev. Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/bluetooth/hci_ldisc.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 6e24038057842e..6aef3bde10d70e 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -298,6 +299,12 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable) unsigned int set = 0; unsigned int clear = 0; + if (hu->serdev) { + serdev_device_set_flow_control(hu->serdev, !enable); + serdev_device_set_rts(hu->serdev, !enable); + return; + } + if (enable) { /* Disable hardware flow control */ ktermios = tty->termios; From da548d5a6f9eb188b4b0b07dd70f60d0b7ca05d3 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 4 Oct 2017 20:43:36 +0200 Subject: [PATCH 0929/3326] Bluetooth: hci_bcm: Fix setting of irq trigger type [ Upstream commit 227630cccdbb8f8a1b24ac26517b75079c9a69c9 ] This commit fixes 2 issues with host-wake irq trigger type handling in hci_bcm: 1) bcm_setup_sleep sets sleep_params.host_wake_active based on bcm_device.irq_polarity, but bcm_request_irq was always requesting IRQF_TRIGGER_RISING as trigger type independent of irq_polarity. This was a problem when the irq is described as a GpioInt rather then an Interrupt in the DSDT as for GpioInt-s the value passed to request_irq is honored. This commit fixes this by requesting the correct trigger type depending on bcm_device.irq_polarity. 2) bcm_device.irq_polarity was used to directly store an ACPI polarity value (ACPI_ACTIVE_*). This is undesirable because hci_bcm is also used with device-tree and checking for something like ACPI_ACTIVE_LOW in a non ACPI specific function like bcm_request_irq feels wrong. This commit fixes this by renaming irq_polarity to irq_active_low and changing its type to a bool. Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/bluetooth/hci_bcm.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index e2540113d0dac1..73d2d88ddc0398 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -68,7 +68,7 @@ struct bcm_device { u32 init_speed; u32 oper_speed; int irq; - u8 irq_polarity; + bool irq_active_low; #ifdef CONFIG_PM struct hci_uart *hu; @@ -213,7 +213,9 @@ static int bcm_request_irq(struct bcm_data *bcm) } err = devm_request_irq(&bdev->pdev->dev, bdev->irq, bcm_host_wake, - IRQF_TRIGGER_RISING, "host_wake", bdev); + bdev->irq_active_low ? IRQF_TRIGGER_FALLING : + IRQF_TRIGGER_RISING, + "host_wake", bdev); if (err) goto unlock; @@ -253,7 +255,7 @@ static int bcm_setup_sleep(struct hci_uart *hu) struct sk_buff *skb; struct bcm_set_sleep_mode sleep_params = default_sleep_params; - sleep_params.host_wake_active = !bcm->dev->irq_polarity; + sleep_params.host_wake_active = !bcm->dev->irq_active_low; skb = __hci_cmd_sync(hu->hdev, 0xfc27, sizeof(sleep_params), &sleep_params, HCI_INIT_TIMEOUT); @@ -690,10 +692,8 @@ static const struct acpi_gpio_mapping acpi_bcm_int_first_gpios[] = { }; #ifdef CONFIG_ACPI -static u8 acpi_active_low = ACPI_ACTIVE_LOW; - /* IRQ polarity of some chipsets are not defined correctly in ACPI table. */ -static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = { +static const struct dmi_system_id bcm_active_low_irq_dmi_table[] = { { .ident = "Asus T100TA", .matches = { @@ -701,7 +701,6 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = { "ASUSTeK COMPUTER INC."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"), }, - .driver_data = &acpi_active_low, }, { .ident = "Asus T100CHI", @@ -710,7 +709,6 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = { "ASUSTeK COMPUTER INC."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100CHI"), }, - .driver_data = &acpi_active_low, }, { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */ .ident = "Lenovo ThinkPad 8", @@ -718,7 +716,6 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"), }, - .driver_data = &acpi_active_low, }, { } }; @@ -733,13 +730,13 @@ static int bcm_resource(struct acpi_resource *ares, void *data) switch (ares->type) { case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: irq = &ares->data.extended_irq; - dev->irq_polarity = irq->polarity; + dev->irq_active_low = irq->polarity == ACPI_ACTIVE_LOW; break; case ACPI_RESOURCE_TYPE_GPIO: gpio = &ares->data.gpio; if (gpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT) - dev->irq_polarity = gpio->polarity; + dev->irq_active_low = gpio->polarity == ACPI_ACTIVE_LOW; break; case ACPI_RESOURCE_TYPE_SERIAL_BUS: @@ -834,11 +831,11 @@ static int bcm_acpi_probe(struct bcm_device *dev) return ret; acpi_dev_free_resource_list(&resources); - dmi_id = dmi_first_match(bcm_wrong_irq_dmi_table); + dmi_id = dmi_first_match(bcm_active_low_irq_dmi_table); if (dmi_id) { bt_dev_warn(dev, "%s: Overwriting IRQ polarity to active low", dmi_id->ident); - dev->irq_polarity = *(u8 *)dmi_id->driver_data; + dev->irq_active_low = true; } return 0; From cbd6b3694a4adec779cdb9266883a21254d5897c Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 29 Aug 2017 05:32:31 -0400 Subject: [PATCH 0930/3326] i40e/i40evf: spread CPU affinity hints across online CPUs only [ Upstream commit be664cbefc50977aaefc868ba6a1109ec9b7449d ] Currently, when setting up the IRQ for a q_vector, we set an affinity hint based on the v_idx of that q_vector. Meaning a loop iterates on v_idx, which is an incremental value, and the cpumask is created based on this value. This is a problem in systems with multiple logical CPUs per core (like in simultaneous multithreading (SMT) scenarios). If we disable some logical CPUs, by turning SMT off for example, we will end up with a sparse cpu_online_mask, i.e., only the first CPU in a core is online, and incremental filling in q_vector cpumask might lead to multiple offline CPUs being assigned to q_vectors. Example: if we have a system with 8 cores each one containing 8 logical CPUs (SMT == 8 in this case), we have 64 CPUs in total. But if SMT is disabled, only the 1st CPU in each core remains online, so the cpu_online_mask in this case would have only 8 bits set, in a sparse way. In general case, when SMT is off the cpu_online_mask has only C bits set: 0, 1*N, 2*N, ..., C*(N-1) where C == # of cores; N == # of logical CPUs per core. In our example, only bits 0, 8, 16, 24, 32, 40, 48, 56 would be set. Instead, we should only assign hints for CPUs which are online. Even better, the kernel already provides a function, cpumask_local_spread() which takes an index and returns a CPU, spreading the interrupts across local NUMA nodes first, and then remote ones if necessary. Since we generally have a 1:1 mapping between vectors and CPUs, there is no real advantage to spreading vectors to local CPUs first. In order to avoid mismatch of the default XPS hints, we'll pass -1 so that it spreads across all CPUs without regard to the node locality. Note that we don't need to change the q_vector->affinity_mask as this is initialized to cpu_possible_mask, until an actual affinity is set and then notified back to us. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/i40e/i40e_main.c | 16 +++++++++++----- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 9 ++++++--- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ea20aacd5e1d24..b2cde9b16d8242 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2874,14 +2874,15 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) static void i40e_config_xps_tx_ring(struct i40e_ring *ring) { struct i40e_vsi *vsi = ring->vsi; + int cpu; if (!ring->q_vector || !ring->netdev) return; if ((vsi->tc_config.numtc <= 1) && !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { - netif_set_xps_queue(ring->netdev, - get_cpu_mask(ring->q_vector->v_idx), + cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); + netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), ring->queue_index); } @@ -3471,6 +3472,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) int tx_int_idx = 0; int vector, err; int irq_num; + int cpu; for (vector = 0; vector < q_vectors; vector++) { struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; @@ -3506,10 +3508,14 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) q_vector->affinity_notify.notify = i40e_irq_affinity_notify; q_vector->affinity_notify.release = i40e_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); - /* get_cpu_mask returns a static constant mask with - * a permanent lifetime so it's ok to use here. + /* Spread affinity hints out across online CPUs. + * + * get_cpu_mask returns a static constant mask with + * a permanent lifetime so it's ok to pass to + * irq_set_affinity_hint without making a copy. */ - irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx)); + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); } vsi->irqs_ready = true; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 1825d956bb005c..1ccad6f30ebf48 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -546,6 +546,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) unsigned int vector, q_vectors; unsigned int rx_int_idx = 0, tx_int_idx = 0; int irq_num, err; + int cpu; i40evf_irq_disable(adapter); /* Decrement for Other and TCP Timer vectors */ @@ -584,10 +585,12 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) q_vector->affinity_notify.release = i40evf_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); - /* get_cpu_mask returns a static constant mask with - * a permanent lifetime so it's ok to use here. + /* Spread the IRQ affinity hints across online CPUs. Note that + * get_cpu_mask returns a mask with a permanent lifetime so + * it's safe to use as a hint for irq_set_affinity_hint. */ - irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx)); + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); } return 0; From 45c911bb1814980da78a34423305310d5d327b39 Mon Sep 17 00:00:00 2001 From: Gabriele Paoloni Date: Thu, 28 Sep 2017 15:33:05 +0100 Subject: [PATCH 0931/3326] PCI/AER: Report non-fatal errors only to the affected endpoint [ Upstream commit 86acc790717fb60fb51ea3095084e331d8711c74 ] Previously, if an non-fatal error was reported by an endpoint, we called report_error_detected() for the endpoint, every sibling on the bus, and their descendents. If any of them did not implement the .error_detected() method, do_recovery() failed, leaving all these devices unrecovered. For example, the system described in the bugzilla below has two devices: 0000:74:02.0 [19e5:a230] SAS controller, driver has .error_detected() 0000:74:03.0 [19e5:a235] SATA controller, driver lacks .error_detected() When a device such as 74:02.0 reported a non-fatal error, do_recovery() failed because 74:03.0 lacked an .error_detected() method. But per PCIe r3.1, sec 6.2.2.2.2, such an error does not compromise the Link and does not affect 74:03.0: Non-fatal errors are uncorrectable errors which cause a particular transaction to be unreliable but the Link is otherwise fully functional. Isolating Non-fatal from Fatal errors provides Requester/Receiver logic in a device or system management software the opportunity to recover from the error without resetting the components on the Link and disturbing other transactions in progress. Devices not associated with the transaction in error are not impacted by the error. Report non-fatal errors only to the endpoint that reported them. We really want to check for AER_NONFATAL here, but the current code structure doesn't allow that. Looking for pci_channel_io_normal is the best we can do now. Link: https://bugzilla.kernel.org/show_bug.cgi?id=197055 Fixes: 6c2b374d7485 ("PCI-Express AER implemetation: AER core and aerdriver") Signed-off-by: Gabriele Paoloni Signed-off-by: Dongdong Liu [bhelgaas: changelog] Signed-off-by: Bjorn Helgaas Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/pci/pcie/aer/aerdrv_core.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 890efcc574cbb9..744805232155cc 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -390,7 +390,14 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, * If the error is reported by an end point, we think this * error is related to the upstream link of the end point. */ - pci_walk_bus(dev->bus, cb, &result_data); + if (state == pci_channel_io_normal) + /* + * the error is non fatal so the bus is ok, just invoke + * the callback for the function that logged the error. + */ + cb(dev, &result_data); + else + pci_walk_bus(dev->bus, cb, &result_data); } return result_data.result; From 16e1626e54f835cb009de675d1f6b5a0ff9183d9 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 22 Sep 2017 14:58:17 -0500 Subject: [PATCH 0932/3326] tracing: Exclude 'generic fields' from histograms [ Upstream commit a15f7fc20389a8827d5859907568b201234d4b79 ] There are a small number of 'generic fields' (comm/COMM/cpu/CPU) that are found by trace_find_event_field() but are only meant for filtering. Specifically, they unlike normal fields, they have a size of 0 and thus wreak havoc when used as a histogram key. Exclude these (return -EINVAL) when used as histogram keys. Link: http://lkml.kernel.org/r/956154cbc3e8a4f0633d619b886c97f0f0edf7b4.1506105045.git.tom.zanussi@linux.intel.com Signed-off-by: Tom Zanussi Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_events_hist.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 1c21d0e2a145a6..7eb975a2d0e132 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -450,7 +450,7 @@ static int create_val_field(struct hist_trigger_data *hist_data, } field = trace_find_event_field(file->event_call, field_name); - if (!field) { + if (!field || !field->size) { ret = -EINVAL; goto out; } @@ -548,7 +548,7 @@ static int create_key_field(struct hist_trigger_data *hist_data, } field = trace_find_event_field(file->event_call, field_name); - if (!field) { + if (!field || !field->size) { ret = -EINVAL; goto out; } From f9e51fb046db69a2e83d7d7422e33f3402d1800b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-Fran=C3=A7ois=20T=C3=AAtu?= Date: Fri, 29 Sep 2017 16:19:44 -0400 Subject: [PATCH 0933/3326] ASoC: codecs: msm8916-wcd-analog: fix micbias level MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 664611e7e02f76fbc5470ef545b2657ed25c292b ] The macro used to set the microphone bias level causes the snd_soc_write() call to overwrite other fields in the CDC_A_MICB_1_VAL register. The macro also does not return the proper level value to use. This fixes this by preserving all bits from the register that are not the level while setting the level. Signed-off-by: Jean-François Têtu Acked-by: Srinivas Kandagatla Signed-off-by: Mark Brown Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- sound/soc/codecs/msm8916-wcd-analog.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index 549c269acc7dfd..a42f8ebb96701d 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -104,7 +104,7 @@ #define CDC_A_MICB_1_VAL (0xf141) #define MICB_MIN_VAL 1600 #define MICB_STEP_SIZE 50 -#define MICB_VOLTAGE_REGVAL(v) ((v - MICB_MIN_VAL)/MICB_STEP_SIZE) +#define MICB_VOLTAGE_REGVAL(v) (((v - MICB_MIN_VAL)/MICB_STEP_SIZE) << 3) #define MICB_1_VAL_MICB_OUT_VAL_MASK GENMASK(7, 3) #define MICB_1_VAL_MICB_OUT_VAL_V2P70V ((0x16) << 3) #define MICB_1_VAL_MICB_OUT_VAL_V1P80V ((0x4) << 3) @@ -349,8 +349,9 @@ static void pm8916_wcd_analog_micbias_enable(struct snd_soc_codec *codec) | MICB_1_CTL_EXT_PRECHARG_EN_ENABLE); if (wcd->micbias_mv) { - snd_soc_write(codec, CDC_A_MICB_1_VAL, - MICB_VOLTAGE_REGVAL(wcd->micbias_mv)); + snd_soc_update_bits(codec, CDC_A_MICB_1_VAL, + MICB_1_VAL_MICB_OUT_VAL_MASK, + MICB_VOLTAGE_REGVAL(wcd->micbias_mv)); /* * Special headset needs MICBIAS as 2.7V so wait for * 50 msec for the MICBIAS to reach 2.7 volts. From f5fec0590cd8732c810c0b6d9ae9e3a42021f2cb Mon Sep 17 00:00:00 2001 From: Ed Blake Date: Mon, 2 Oct 2017 11:00:33 +0100 Subject: [PATCH 0934/3326] ASoC: img-parallel-out: Add pm_runtime_get/put to set_fmt callback [ Upstream commit c70458890ff15d858bd347fa9f563818bcd6e457 ] Add pm_runtime_get_sync and pm_runtime_put calls to set_fmt callback function. This fixes a bus error during boot when CONFIG_SUSPEND is defined when this function gets called while the device is runtime disabled and device registers are accessed while the clock is disabled. Signed-off-by: Ed Blake Signed-off-by: Mark Brown Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- sound/soc/img/img-parallel-out.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c index 23b0f0f6ec9cb6..2fc8a637220603 100644 --- a/sound/soc/img/img-parallel-out.c +++ b/sound/soc/img/img-parallel-out.c @@ -164,9 +164,11 @@ static int img_prl_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) return -EINVAL; } + pm_runtime_get_sync(prl->dev); reg = img_prl_out_readl(prl, IMG_PRL_OUT_CTL); reg = (reg & ~IMG_PRL_OUT_CTL_EDGE_MASK) | control_set; img_prl_out_writel(prl, reg, IMG_PRL_OUT_CTL); + pm_runtime_put(prl->dev); return 0; } From 97f41b41c432e5a80c91445d92c2f4b729984d36 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 29 Sep 2017 13:29:40 +1000 Subject: [PATCH 0935/3326] powerpc/xmon: Avoid tripping SMP hardlockup watchdog [ Upstream commit 064996d62a33ffe10264b5af5dca92d54f60f806 ] The SMP hardlockup watchdog cross-checks other CPUs for lockups, which causes xmon headaches because it's assuming interrupts hard disabled means no watchdog troubles. Try to improve that by calling touch_nmi_watchdog() in obvious places where secondaries are spinning. Also annotate these spin loops with spin_begin/end calls. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/xmon/xmon.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index c008083fbc4f63..2c8b325591cc25 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -530,14 +530,19 @@ static int xmon_core(struct pt_regs *regs, int fromipi) waiting: secondary = 1; + spin_begin(); while (secondary && !xmon_gate) { if (in_xmon == 0) { - if (fromipi) + if (fromipi) { + spin_end(); goto leave; + } secondary = test_and_set_bit(0, &in_xmon); } - barrier(); + spin_cpu_relax(); + touch_nmi_watchdog(); } + spin_end(); if (!secondary && !xmon_gate) { /* we are the first cpu to come in */ @@ -568,21 +573,25 @@ static int xmon_core(struct pt_regs *regs, int fromipi) mb(); xmon_gate = 1; barrier(); + touch_nmi_watchdog(); } cmdloop: while (in_xmon) { if (secondary) { + spin_begin(); if (cpu == xmon_owner) { if (!test_and_set_bit(0, &xmon_taken)) { secondary = 0; + spin_end(); continue; } /* missed it */ while (cpu == xmon_owner) - barrier(); + spin_cpu_relax(); } - barrier(); + spin_cpu_relax(); + touch_nmi_watchdog(); } else { cmd = cmds(regs); if (cmd != 0) { From fa21a13d76a7eed4b1df1a5223752051dbfde948 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 29 Sep 2017 13:29:39 +1000 Subject: [PATCH 0936/3326] powerpc/watchdog: Do not trigger SMP crash from touch_nmi_watchdog [ Upstream commit 80e4d70b06863e0104e5a0dc78aa3710297fbd4b ] In xmon, touch_nmi_watchdog() is not expected to be checking that other CPUs have not touched the watchdog, so the code will just call touch_nmi_watchdog() once before re-enabling hard interrupts. Just update our CPU's state, and ignore apparently stuck SMP threads. Arguably touch_nmi_watchdog should check for SMP lockups, and callers should be fixed, but that's not trivial for the input code of xmon. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/watchdog.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index 57190f384f633e..ce848ff84eddfc 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -276,9 +276,12 @@ void arch_touch_nmi_watchdog(void) { unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000; int cpu = smp_processor_id(); + u64 tb = get_tb(); - if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks) - watchdog_timer_interrupt(cpu); + if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) { + per_cpu(wd_timer_tb, cpu) = tb; + wd_smp_clear_cpu_pending(cpu, tb); + } } EXPORT_SYMBOL(arch_touch_nmi_watchdog); From 5d1b6695edb773d27f100eafc8cc9a34c58cb07f Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Tue, 3 Oct 2017 19:20:08 -0300 Subject: [PATCH 0937/3326] sctp: silence warns on sctp_stream_init allocations [ Upstream commit 1ae2eaaa229bc350b6f38fbf4ab9c873532aecfb ] As SCTP supports up to 65535 streams, that can lead to very large allocations in sctp_stream_init(). As Xin Long noticed, systems with small amounts of memory are more prone to not have enough memory and dump warnings on dmesg initiated by user actions. Thus, silence them. Also, if the reallocation of stream->out is not necessary, skip it and keep the memory we already have. Reported-by: Xin Long Tested-by: Xin Long Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/sctp/stream.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/net/sctp/stream.c b/net/sctp/stream.c index fa8371ff05c43f..724adf2786a23b 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -40,9 +40,14 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, { int i; + gfp |= __GFP_NOWARN; + /* Initial stream->out size may be very big, so free it and alloc - * a new one with new outcnt to save memory. + * a new one with new outcnt to save memory if needed. */ + if (outcnt == stream->outcnt) + goto in; + kfree(stream->out); stream->out = kcalloc(outcnt, sizeof(*stream->out), gfp); @@ -53,6 +58,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, for (i = 0; i < stream->outcnt; i++) stream->out[i].state = SCTP_STREAM_OPEN; +in: if (!incnt) return 0; From c01b06d9ac357e1d6d95d7b62be60ecd69071c2b Mon Sep 17 00:00:00 2001 From: Nicolas Dechesne Date: Tue, 3 Oct 2017 11:49:51 +0200 Subject: [PATCH 0938/3326] ASoC: codecs: msm8916-wcd-analog: fix module autoload [ Upstream commit 46d69e141d479585c105a4d5b2337cd2ce6967e5 ] If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ modinfo snd_soc_msm8916_analog | grep alias $ After this patch: $ modinfo snd_soc_msm8916_analog | grep alias alias: of:N*T*Cqcom,pm8916-wcd-analog-codecC* alias: of:N*T*Cqcom,pm8916-wcd-analog-codec Signed-off-by: Nicolas Dechesne Signed-off-by: Mark Brown Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- sound/soc/codecs/msm8916-wcd-analog.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index a42f8ebb96701d..18933bf6473f47 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -1242,6 +1242,8 @@ static const struct of_device_id pm8916_wcd_analog_spmi_match_table[] = { { } }; +MODULE_DEVICE_TABLE(of, pm8916_wcd_analog_spmi_match_table); + static struct platform_driver pm8916_wcd_analog_spmi_driver = { .driver = { .name = "qcom,pm8916-wcd-spmi-codec", From 16ddeff35b7b84eefccddae23133f3eced68a8c0 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 11 Aug 2017 11:14:58 -0700 Subject: [PATCH 0939/3326] fm10k: fix mis-ordered parameters in declaration for .ndo_set_vf_bw [ Upstream commit 3e256ac5b1ec307e5dd5a4c99fbdbc651446c738 ] We've had support for setting both a minimum and maximum bandwidth via .ndo_set_vf_bw since commit 883a9ccbae56 ("fm10k: Add support for SR-IOV to driver", 2014-09-20). Likely because we do not support minimum rates, the declaration mis-ordered the "unused" parameter, which causes warnings when analyzed with cppcheck. Fix this warning by properly declaring the min_rate and max_rate variables in the declaration and definition (rather than using "unused"). Also rename "rate" to max_rate so as to clarify that we only support setting the maximum rate. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/fm10k/fm10k.h | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_iov.c | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 689c413b7782f3..d2f9a2dd76a222 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -526,8 +526,8 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid); int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac); int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, u8 qos, __be16 vlan_proto); -int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate, - int unused); +int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, + int __always_unused min_rate, int max_rate); int fm10k_ndo_get_vf_config(struct net_device *netdev, int vf_idx, struct ifla_vf_info *ivi); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 5f4dac0d36ef15..f919199944a05d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -482,7 +482,7 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, } int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, - int __always_unused unused, int rate) + int __always_unused min_rate, int max_rate) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_iov_data *iov_data = interface->iov_data; @@ -493,14 +493,15 @@ int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, return -EINVAL; /* rate limit cannot be less than 10Mbs or greater than link speed */ - if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX)) + if (max_rate && + (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX)) return -EINVAL; /* store values */ - iov_data->vf_info[vf_idx].rate = rate; + iov_data->vf_info[vf_idx].rate = max_rate; /* update hardware configuration */ - hw->iov.ops.configure_tc(hw, vf_idx, rate); + hw->iov.ops.configure_tc(hw, vf_idx, max_rate); return 0; } From 096232d99989e34c80bd3a15e343fc3bbdfa42d8 Mon Sep 17 00:00:00 2001 From: Dick Kennedy Date: Fri, 29 Sep 2017 17:34:42 -0700 Subject: [PATCH 0940/3326] scsi: lpfc: Fix secure firmware updates [ Upstream commit 184fc2b9a8bcbda9c14d0a1e7fbecfc028c7702e ] Firmware update fails with: status x17 add_status x56 on the final write If multiple DMA buffers are used for the download, some firmware revs have difficulty with signatures and crcs split across the dma buffer boundaries. Resolve by making all writes be a single 4k page in length. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_hw4.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 1db0a38683f438..2b145966c73f4f 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -3636,7 +3636,7 @@ struct lpfc_mbx_get_port_name { #define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 #define MB_CQE_STATUS_DMA_FAILED 0x5 -#define LPFC_MBX_WR_CONFIG_MAX_BDE 8 +#define LPFC_MBX_WR_CONFIG_MAX_BDE 1 struct lpfc_mbx_wr_object { struct mbox_header header; union { From 6fe8e4f3e4e937ab762fe9114818c64ae215c882 Mon Sep 17 00:00:00 2001 From: Dick Kennedy Date: Fri, 29 Sep 2017 17:34:32 -0700 Subject: [PATCH 0941/3326] scsi: lpfc: PLOGI failures during NPIV testing [ Upstream commit e8bcf0ae4c0346fdc78ebefe0eefcaa6a6622d38 ] Local Reject/Invalid RPI errors seen during discovery. Temporary RPI cleanup was occurring regardless of SLI rev. It's only necessary on SLI-4. Adjust the test for whether cleanup is necessary. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_hbadisc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 499df9d1733913..d9a03beb76a4bb 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -4983,7 +4983,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_cancel_retry_delay_tmo(vport, ndlp); if ((ndlp->nlp_flag & NLP_DEFER_RM) && !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) && - !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { + !(ndlp->nlp_flag & NLP_RPI_REGISTERED) && + phba->sli_rev != LPFC_SLI_REV4) { /* For this case we need to cleanup the default rpi * allocated by the firmware. */ From 8da9104839c86b9f80fb45df60ec0beae9f103b1 Mon Sep 17 00:00:00 2001 From: Dick Kennedy Date: Fri, 29 Sep 2017 17:34:31 -0700 Subject: [PATCH 0942/3326] scsi: lpfc: Fix warning messages when NVME_TARGET_FC not defined [ Upstream commit 2299e4323d2bf6e0728fdc6b9e8e9704978d2dd7 ] Warning messages when NVME_TARGET_FC not defined on ppc builds The lpfc_nvmet_replenish_context() function is only meaningful when NVME target mode enabled. Surround the function body with ifdefs for target mode enablement. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reported-by: Stephen Rothwell Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_nvmet.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 3c5b054a56ac44..7ac1a067d78016 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1464,6 +1464,7 @@ static struct lpfc_nvmet_ctxbuf * lpfc_nvmet_replenish_context(struct lpfc_hba *phba, struct lpfc_nvmet_ctx_info *current_infop) { +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_ctxbuf *ctx_buf = NULL; struct lpfc_nvmet_ctx_info *get_infop; int i; @@ -1511,6 +1512,7 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba, get_infop = get_infop->nvmet_ctx_next_cpu; } +#endif /* Nothing found, all contexts for the MRQ are in-flight */ return NULL; } From 4297cf42691eb626ab5f72f416cf7d9ea126a410 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Tue, 22 Aug 2017 06:57:53 -0400 Subject: [PATCH 0943/3326] i40e: fix client notify of VF reset [ Upstream commit c53d11f669c0e7d0daf46a717b6712ad0b09de99 ] Currently there is a bug in which the PF driver fails to inform clients of a VF reset which then causes clients to leak resources. The bug exists because we were incorrectly checking the I40E_VF_STATE_PRE_ENABLE bit. When a VF is first init we go through a reset to initialize variables and allocate resources but we don't want to inform clients of this first reset since the client isn't fully enabled yet so we set a state bit signifying we're in a "pre-enabled" client state. During the first reset we should be clearing the bit, allowing all following resets to notify the client of the reset when the bit is not set. This patch fixes the issue by negating the 'test_and_clear_bit' check to accurately reflect the behavior we want. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index fa25ea512e43e5..e368b0237a1b64 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1008,8 +1008,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); /* Do not notify the client during VF init */ - if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, - &vf->vf_states)) + if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, + &vf->vf_states)) i40e_notify_client_of_vf_reset(pf, abs_vf_id); vf->num_vlan = 0; } From b27bbf1f5b9e62b7b0b8ce9a4f5e474c3119a9a9 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 2 Oct 2017 12:39:09 -0600 Subject: [PATCH 0944/3326] vfio/pci: Virtualize Maximum Payload Size [ Upstream commit 523184972b282cd9ca17a76f6ca4742394856818 ] With virtual PCI-Express chipsets, we now see userspace/guest drivers trying to match the physical MPS setting to a virtual downstream port. Of course a lone physical device surrounded by virtual interconnects cannot make a correct decision for a proper MPS setting. Instead, let's virtualize the MPS control register so that writes through to hardware are disallowed. Userspace drivers like QEMU assume they can write anything to the device and we'll filter out anything dangerous. Since mismatched MPS can lead to AER and other faults, let's add it to the kernel side rather than relying on userspace virtualization to handle it. Signed-off-by: Alex Williamson Reviewed-by: Eric Auger Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/vfio/pci/vfio_pci_config.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 5628fe114347a9..91335e6de88a24 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -849,11 +849,13 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm) /* * Allow writes to device control fields, except devctl_phantom, - * which could confuse IOMMU, and the ARI bit in devctl2, which + * which could confuse IOMMU, MPS, which can break communication + * with other physical devices, and the ARI bit in devctl2, which * is set at probe time. FLR gets virtualized via our writefn. */ p_setw(perm, PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_BCR_FLR, ~PCI_EXP_DEVCTL_PHANTOM); + PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD, + ~PCI_EXP_DEVCTL_PHANTOM); p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI); return 0; } From 2b401d9f1d4543e4e1ad23a46726376adb958831 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Mon, 2 Oct 2017 08:39:35 +0200 Subject: [PATCH 0945/3326] ARM: exynos_defconfig: Enable UAS support for Odroid HC1 board [ Upstream commit a99897f550de96841aecb811455a67ad7a4e39a7 ] Odroid HC1 board has built-in JMicron USB to SATA bridge, which supports UAS protocol. Compile-in support for it (instead of enabling it as module) to make sure that all built-in storage devices are available for rootfs. The bridge itself also supports fallback to standard USB Mass Storage protocol, but USB Mass Storage class doesn't bind to it when UAS is compiled as module and modules are not (yet) available. Signed-off-by: Marek Szyprowski Signed-off-by: Krzysztof Kozlowski Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm/configs/exynos_defconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index 8c2a2619971b1e..f1d7834990ece6 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig @@ -244,7 +244,7 @@ CONFIG_USB_STORAGE_ONETOUCH=m CONFIG_USB_STORAGE_KARMA=m CONFIG_USB_STORAGE_CYPRESS_ATACB=m CONFIG_USB_STORAGE_ENE_UB6250=m -CONFIG_USB_UAS=m +CONFIG_USB_UAS=y CONFIG_USB_DWC3=y CONFIG_USB_DWC2=y CONFIG_USB_HSIC_USB3503=y From fb383223d00ffa445b266c5fd54ed80fd8c57d17 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 2 Oct 2017 07:17:50 -0700 Subject: [PATCH 0946/3326] fm10k: ensure we process SM mbx when processing VF mbx [ Upstream commit 17a91809942ca32c70026d2d5ba3348a2c4fdf8f ] When we process VF mailboxes, the driver is likely going to also queue up messages to the switch manager. This process merely queues up the FIFO, but doesn't actually begin the transmission process. Because we hold the mailbox lock during this VF processing, the PF<->SM mailbox is not getting processed at this time. Ensure that we actually process the PF<->SM mailbox in between each PF<->VF mailbox. This should ensure prompt transmission of the messages queued up after each VF message is received and handled. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/fm10k/fm10k_iov.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index f919199944a05d..e72fd52bacfe0c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -126,6 +126,9 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface) struct fm10k_mbx_info *mbx = &vf_info->mbx; u16 glort = vf_info->glort; + /* process the SM mailbox first to drain outgoing messages */ + hw->mbx.ops.process(hw, &hw->mbx); + /* verify port mapping is valid, if not reset port */ if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) hw->iov.ops.reset_lport(hw, vf_info); From a58a3af86a4e8a2bd1d2d041341efa28818c6e89 Mon Sep 17 00:00:00 2001 From: Mick Tarsel Date: Thu, 28 Sep 2017 13:53:18 -0700 Subject: [PATCH 0947/3326] ibmvnic: Set state UP [ Upstream commit e876a8a7e9dd89dc88c12ca2e81beb478dbe9897 ] State is initially reported as UNKNOWN. Before register call netif_carrier_off(). Once the device is opened, call netif_carrier_on() in order to set the state to UP. Signed-off-by: Mick Tarsel Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/ibm/ibmvnic.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c66abd476023a4..3b0db01ead1f42 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -927,6 +927,7 @@ static int ibmvnic_open(struct net_device *netdev) } rc = __ibmvnic_open(netdev); + netif_carrier_on(netdev); mutex_unlock(&adapter->reset_lock); return rc; @@ -3899,6 +3900,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) if (rc) goto ibmvnic_init_fail; + netif_carrier_off(netdev); rc = register_netdev(netdev); if (rc) { dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); From 0006d8c76b0cbfc1dfefe41ac93881f1f8c15432 Mon Sep 17 00:00:00 2001 From: Mike Manning Date: Mon, 25 Sep 2017 22:01:36 +0100 Subject: [PATCH 0948/3326] net: ipv6: send NS for DAD when link operationally up [ Upstream commit 1f372c7bfb23286d2bf4ce0423ab488e86b74bb2 ] The NS for DAD are sent on admin up as long as a valid qdisc is found. A race condition exists by which these packets will not egress the interface if the operational state of the lower device is not yet up. The solution is to delay DAD until the link is operationally up according to RFC2863. Rather than only doing this, follow the existing code checks by deferring IPv6 device initialization altogether. The fix allows DAD on devices like tunnels that are controlled by userspace control plane. The fix has no impact on regular deployments, but means that there is no IPv6 connectivity until the port has been opened in the case of port-based network access control, which should be desirable. Signed-off-by: Mike Manning Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv6/addrconf.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 8a1c846d3df949..2ec39404c44928 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -303,10 +303,10 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { .disable_policy = 0, }; -/* Check if a valid qdisc is available */ -static inline bool addrconf_qdisc_ok(const struct net_device *dev) +/* Check if link is ready: is it up and is a valid qdisc available */ +static inline bool addrconf_link_ready(const struct net_device *dev) { - return !qdisc_tx_is_noop(dev); + return netif_oper_up(dev) && !qdisc_tx_is_noop(dev); } static void addrconf_del_rs_timer(struct inet6_dev *idev) @@ -451,7 +451,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) ndev->token = in6addr_any; - if (netif_running(dev) && addrconf_qdisc_ok(dev)) + if (netif_running(dev) && addrconf_link_ready(dev)) ndev->if_flags |= IF_READY; ipv6_mc_init_dev(ndev); @@ -3404,7 +3404,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, /* restore routes for permanent addresses */ addrconf_permanent_addr(dev); - if (!addrconf_qdisc_ok(dev)) { + if (!addrconf_link_ready(dev)) { /* device is not ready yet. */ pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", dev->name); @@ -3419,7 +3419,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, run_pending = 1; } } else if (event == NETDEV_CHANGE) { - if (!addrconf_qdisc_ok(dev)) { + if (!addrconf_link_ready(dev)) { /* device is still not ready. */ break; } From 0fbdd907e4b3836b025b8690823c7271f939a938 Mon Sep 17 00:00:00 2001 From: "Wei Hu(Xavier)" Date: Fri, 29 Sep 2017 23:10:12 +0800 Subject: [PATCH 0949/3326] RDMA/hns: Avoid NULL pointer exception [ Upstream commit 5e437b1d7e8d31ff9a4b8e898eb3a6cee309edd9 ] After the loop in hns_roce_v1_mr_free_work_fn function, it is possible that all qps will have been freed (in which case ne will be 0). If that happens, then later in the function when we dereference hr_qp we will get an exception. Check ne is not 0 to make sure we actually have an hr_qp left to work on. This patch fixes the smatch error as below: drivers/infiniband/hw/hns/hns_roce_hw_v1.c:1009 hns_roce_v1_mr_free_work_fn() error: we previously assumed 'hr_qp' could be null Signed-off-by: Wei Hu (Xavier) Signed-off-by: Lijun Ou Signed-off-by: Shaobo Xu Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 747efd1ae5a6c4..8208c30f03c5ae 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1001,6 +1001,11 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) } } + if (!ne) { + dev_err(dev, "Reseved loop qp is absent!\n"); + goto free_work; + } + do { ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); if (ret < 0) { From ae35e16e0a57da2a7aa8631c8929dd79bac74bde Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sat, 23 Sep 2017 13:25:30 +0530 Subject: [PATCH 0950/3326] staging: greybus: light: Release memory obtained by kasprintf [ Upstream commit 04820da21050b35eed68aa046115d810163ead0c ] Free memory region, if gb_lights_channel_config is not successful. Signed-off-by: Arvind Yadav Reviewed-by: Rui Miguel Silva Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/greybus/light.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c index 3f4148c92308e6..0f538b8c3a0762 100644 --- a/drivers/staging/greybus/light.c +++ b/drivers/staging/greybus/light.c @@ -925,6 +925,8 @@ static void __gb_lights_led_unregister(struct gb_channel *channel) return; led_classdev_unregister(cdev); + kfree(cdev->name); + cdev->name = NULL; channel->led = NULL; } From 71e51e4d488d56345d72dc3c89e67af015afea22 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Fri, 29 Sep 2017 16:22:54 +0800 Subject: [PATCH 0951/3326] clk: sunxi-ng: sun6i: Rename HDMI DDC clock to avoid name collision [ Upstream commit 7f3ed79188f2f094d0ee366fa858857fb7f511ba ] The HDMI DDC clock found in the CCU is the parent of the actual DDC clock within the HDMI controller. That clock is also named "hdmi-ddc". Rename the one in the CCU to "ddc". This makes more sense than renaming the one in the HDMI controller to something else. Fixes: c6e6c96d8fa6 ("clk: sunxi-ng: Add A31/A31s clocks") Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/clk/sunxi-ng/ccu-sun6i-a31.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 8af434815fba9f..241fb13f1c0612 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents, 0x150, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT); -static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0); +static SUNXI_CCU_GATE(hdmi_ddc_clk, "ddc", "osc24M", 0x150, BIT(30), 0); static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0); From 0aaff15c10132eb36ab16b4785cd207075e5943b Mon Sep 17 00:00:00 2001 From: Hoang Tran Date: Wed, 27 Sep 2017 18:30:58 +0200 Subject: [PATCH 0952/3326] tcp: fix under-evaluated ssthresh in TCP Vegas [ Upstream commit cf5d74b85ef40c202c76d90959db4d850f301b95 ] With the commit 76174004a0f19785 (tcp: do not slow start when cwnd equals ssthresh), the comparison to the reduced cwnd in tcp_vegas_ssthresh() would under-evaluate the ssthresh. Signed-off-by: Hoang Tran Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_vegas.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index 218cfcc7765000..ee113ff15fd02f 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -158,7 +158,7 @@ EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) { - return min(tp->snd_ssthresh, tp->snd_cwnd-1); + return min(tp->snd_ssthresh, tp->snd_cwnd); } static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) From 051c3df7d6b8d5f1948503637c69fc939db69035 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Thu, 28 Sep 2017 13:53:27 +0200 Subject: [PATCH 0953/3326] rtc: set the alarm to the next expiring timer [ Upstream commit 74717b28cb32e1ad3c1042cafd76b264c8c0f68d ] If there is any non expired timer in the queue, the RTC alarm is never set. This is an issue when adding a timer that expires before the next non expired timer. Ensure the RTC alarm is set in that case. Fixes: 2b2f5ff00f63 ("rtc: interface: ignore expired timers when enqueuing new timers") Signed-off-by: Alexandre Belloni Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/rtc/interface.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 8cec9a02c0b893..9eb32ead63dbea 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -779,7 +779,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) } timerqueue_add(&rtc->timerqueue, &timer->node); - if (!next) { + if (!next || ktime_before(timer->node.expires, next->expires)) { struct rtc_wkalrm alarm; int err; alarm.time = rtc_ktime_to_tm(timer->node.expires); From 80879ecb4624d58e10cfe03550056c34483187a5 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 1 Sep 2017 14:29:56 +1000 Subject: [PATCH 0954/3326] cpuidle: fix broadcast control when broadcast can not be entered [ Upstream commit f187851b9b4a76952b1158b86434563dd2031103 ] When failing to enter broadcast timer mode for an idle state that requires it, a new state is selected that does not require broadcast, but the broadcast variable remains set. This causes tick_broadcast_exit to be called despite not having entered broadcast mode. This causes the WARN_ON_ONCE(!irqs_disabled()) to trigger in some cases. It does not appear to cause problems for code today, but seems to violate the interface so should be fixed. Signed-off-by: Nicholas Piggin Reviewed-by: Thomas Gleixner Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/cpuidle/cpuidle.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 484cc8909d5c36..ed4df58a855e14 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -208,6 +208,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, return -EBUSY; } target_state = &drv->states[index]; + broadcast = false; } /* Take note of the planned idle state. */ From 0d74c05ca7ef083cbc676aacfd147ff60de54c95 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 15 Aug 2017 16:47:19 -0700 Subject: [PATCH 0955/3326] drm/vc4: Avoid using vrefresh==0 mode in DSI htotal math. [ Upstream commit af2eca53206c59ce9308a4f5f46c4a104a179b6b ] The incoming mode might have a missing vrefresh field if it came from drmModeSetCrtc(), which the kernel is supposed to calculate using drm_mode_vrefresh(). We could either use that or the adjusted_mode's original vrefresh value. However, we can maintain a more exact vrefresh value (not just the integer approximation), by scaling by the ratio of our clocks. v2: Use math suggested by Andrzej Hajda instead. v3: Simplify math now that adjusted_mode->clock isn't padded. v4: Drop some parens. Signed-off-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/20170815234722.20700-2-eric@anholt.net Reviewed-by: Andrzej Hajda Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/vc4/vc4_dsi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index d1e0dc90804824..04796d7d0fdbb4 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -866,7 +866,8 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder, adjusted_mode->clock = pixel_clock_hz / 1000 + 1; /* Given the new pixel clock, adjust HFP to keep vrefresh the same. */ - adjusted_mode->htotal = pixel_clock_hz / (mode->vrefresh * mode->vtotal); + adjusted_mode->htotal = adjusted_mode->clock * mode->htotal / + mode->clock; adjusted_mode->hsync_end += adjusted_mode->htotal - mode->htotal; adjusted_mode->hsync_start += adjusted_mode->htotal - mode->htotal; From ecffae11228f96c045eb38fa60bdf17dc677b94b Mon Sep 17 00:00:00 2001 From: Scott Franco Date: Tue, 26 Sep 2017 06:44:13 -0700 Subject: [PATCH 0956/3326] IB/opa_vnic: Properly clear Mac Table Digest [ Upstream commit 4bbdfe25600c1909c26747d0b5c39fd0e409bb87 ] Clear the MAC table digest when the MAC table is freed. Reviewed-by: Niranjana Vishwanathapura Signed-off-by: Scott Franco Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c index afa938bd26d611..a72278e9cd274f 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c @@ -139,6 +139,7 @@ void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter) rcu_assign_pointer(adapter->mactbl, NULL); synchronize_rcu(); opa_vnic_free_mac_tbl(mactbl); + adapter->info.vport.mac_tbl_digest = 0; mutex_unlock(&adapter->mactbl_lock); } From 0da9db57c0423f281db8ae2fb1a67e88d3fecd34 Mon Sep 17 00:00:00 2001 From: Niranjana Vishwanathapura Date: Tue, 26 Sep 2017 06:44:07 -0700 Subject: [PATCH 0957/3326] IB/opa_vnic: Properly return the total MACs in UC MAC list [ Upstream commit b77eb45e0d9c324245d165656ab3b38b6f386436 ] Do not include EM specified MAC address in total MACs of the UC MAC list. Reviewed-by: Sudeep Dutt Signed-off-by: Niranjana Vishwanathapura Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c index c2733964379ca6..9655cc3aa3a07a 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c @@ -348,7 +348,7 @@ void opa_vnic_query_mcast_macs(struct opa_vnic_adapter *adapter, void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter, struct opa_veswport_iface_macs *macs) { - u16 start_idx, tot_macs, num_macs, idx = 0, count = 0; + u16 start_idx, tot_macs, num_macs, idx = 0, count = 0, em_macs = 0; struct netdev_hw_addr *ha; start_idx = be16_to_cpu(macs->start_idx); @@ -359,8 +359,10 @@ void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter, /* Do not include EM specified MAC address */ if (!memcmp(adapter->info.vport.base_mac_addr, ha->addr, - ARRAY_SIZE(adapter->info.vport.base_mac_addr))) + ARRAY_SIZE(adapter->info.vport.base_mac_addr))) { + em_macs++; continue; + } if (start_idx > idx++) continue; @@ -383,7 +385,7 @@ void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter, } tot_macs = netdev_hw_addr_list_count(&adapter->netdev->dev_addrs) + - netdev_uc_count(adapter->netdev); + netdev_uc_count(adapter->netdev) - em_macs; macs->tot_macs_in_lst = cpu_to_be16(tot_macs); macs->num_macs_in_msg = cpu_to_be16(count); macs->gen_count = cpu_to_be16(adapter->info.vport.uc_macs_gen_count); From 7254834c43bd32842e96fd856c9c9793a29e6fe2 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 19 Oct 2017 19:05:43 +0200 Subject: [PATCH 0958/3326] thermal/drivers/hisi: Fix missing interrupt enablement commit c176b10b025acee4dc8f2ab1cd64eb73b5ccef53 upstream. The interrupt for the temperature threshold is not enabled at the end of the probe function, enable it after the setup is complete. On the other side, the irq_enabled is not correctly set as we are checking if the interrupt is masked where 'yes' means irq_enabled=false. irq_get_irqchip_state(data->irq, IRQCHIP_STATE_MASKED, &data->irq_enabled); As we are always enabling the interrupt, it is pointless to check if the interrupt is masked or not, just set irq_enabled to 'true'. Signed-off-by: Daniel Lezcano Reviewed-by: Leo Yan Tested-by: Leo Yan Signed-off-by: Eduardo Valentin Signed-off-by: Kevin Wangtao Signed-off-by: Greg Kroah-Hartman --- drivers/thermal/hisi_thermal.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index bd3572c41585ca..8381696241d65c 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -345,8 +345,7 @@ static int hisi_thermal_probe(struct platform_device *pdev) } hisi_thermal_enable_bind_irq_sensor(data); - irq_get_irqchip_state(data->irq, IRQCHIP_STATE_MASKED, - &data->irq_enabled); + data->irq_enabled = true; for (i = 0; i < HISI_MAX_SENSORS; ++i) { ret = hisi_thermal_register_sensor(pdev, data, @@ -358,6 +357,8 @@ static int hisi_thermal_probe(struct platform_device *pdev) hisi_thermal_toggle_sensor(&data->sensors[i], true); } + enable_irq(data->irq); + return 0; } From cf826c57785384341c3f0cf2cfbfe86c02eb6b9b Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 19 Oct 2017 19:05:45 +0200 Subject: [PATCH 0959/3326] thermal/drivers/hisi: Fix kernel panic on alarm interrupt commit 2cb4de785c40d4a2132cfc13e63828f5a28c3351 upstream. The threaded interrupt for the alarm interrupt is requested before the temperature controller is setup. This one can fire an interrupt immediately leading to a kernel panic as the sensor data is not initialized. In order to prevent that, move the threaded irq after the Tsensor is setup. Signed-off-by: Daniel Lezcano Reviewed-by: Leo Yan Tested-by: Leo Yan Signed-off-by: Eduardo Valentin Signed-off-by: Kevin Wangtao Signed-off-by: Greg Kroah-Hartman --- drivers/thermal/hisi_thermal.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index 8381696241d65c..26bb8e2a482f57 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -317,15 +317,6 @@ static int hisi_thermal_probe(struct platform_device *pdev) if (data->irq < 0) return data->irq; - ret = devm_request_threaded_irq(&pdev->dev, data->irq, - hisi_thermal_alarm_irq, - hisi_thermal_alarm_irq_thread, - 0, "hisi_thermal", data); - if (ret < 0) { - dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret); - return ret; - } - platform_set_drvdata(pdev, data); data->clk = devm_clk_get(&pdev->dev, "thermal_clk"); @@ -357,6 +348,15 @@ static int hisi_thermal_probe(struct platform_device *pdev) hisi_thermal_toggle_sensor(&data->sensors[i], true); } + ret = devm_request_threaded_irq(&pdev->dev, data->irq, + hisi_thermal_alarm_irq, + hisi_thermal_alarm_irq_thread, + 0, "hisi_thermal", data); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret); + return ret; + } + enable_irq(data->irq); return 0; From 02c17c0f825c2d22de05c18e97982796b634eff3 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 19 Oct 2017 19:05:46 +0200 Subject: [PATCH 0960/3326] thermal/drivers/hisi: Simplify the temperature/step computation commit 48880b979cdc9ef5a70af020f42b8ba1e51dbd34 upstream. The step and the base temperature are fixed values, we can simplify the computation by converting the base temperature to milli celsius and use a pre-computed step value. That saves us a lot of mult + div for nothing at runtime. Take also the opportunity to change the function names to be consistent with the rest of the code. Signed-off-by: Daniel Lezcano Reviewed-by: Leo Yan Tested-by: Leo Yan Signed-off-by: Eduardo Valentin Signed-off-by: Kevin Wangtao Signed-off-by: Greg Kroah-Hartman --- drivers/thermal/hisi_thermal.c | 41 +++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index 26bb8e2a482f57..ea77aac97ad3bd 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -35,8 +35,9 @@ #define TEMP0_RST_MSK (0x1C) #define TEMP0_VALUE (0x28) -#define HISI_TEMP_BASE (-60) +#define HISI_TEMP_BASE (-60000) #define HISI_TEMP_RESET (100000) +#define HISI_TEMP_STEP (784) #define HISI_MAX_SENSORS 4 @@ -61,19 +62,32 @@ struct hisi_thermal_data { void __iomem *regs; }; -/* in millicelsius */ -static inline int _step_to_temp(int step) +/* + * The temperature computation on the tsensor is as follow: + * Unit: millidegree Celsius + * Step: 255/200 (0.7843) + * Temperature base: -60°C + * + * The register is programmed in temperature steps, every step is 784 + * millidegree and begins at -60 000 m°C + * + * The temperature from the steps: + * + * Temp = TempBase + (steps x 784) + * + * and the steps from the temperature: + * + * steps = (Temp - TempBase) / 784 + * + */ +static inline int hisi_thermal_step_to_temp(int step) { - /* - * Every step equals (1 * 200) / 255 celsius, and finally - * need convert to millicelsius. - */ - return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255)); + return HISI_TEMP_BASE + (step * HISI_TEMP_STEP); } -static inline long _temp_to_step(long temp) +static inline long hisi_thermal_temp_to_step(long temp) { - return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000; + return (temp - HISI_TEMP_BASE) / HISI_TEMP_STEP; } static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data, @@ -99,7 +113,7 @@ static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data, usleep_range(3000, 5000); val = readl(data->regs + TEMP0_VALUE); - val = _step_to_temp(val); + val = hisi_thermal_step_to_temp(val); mutex_unlock(&data->thermal_lock); @@ -126,10 +140,11 @@ static void hisi_thermal_enable_bind_irq_sensor writel((sensor->id << 12), data->regs + TEMP0_CFG); /* enable for interrupt */ - writel(_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00, + writel(hisi_thermal_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00, data->regs + TEMP0_TH); - writel(_temp_to_step(HISI_TEMP_RESET), data->regs + TEMP0_RST_TH); + writel(hisi_thermal_temp_to_step(HISI_TEMP_RESET), + data->regs + TEMP0_RST_TH); /* enable module */ writel(0x1, data->regs + TEMP0_RST_MSK); From 5431aef93678a1f91e8b3ab41076f2cd1be34353 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 19 Oct 2017 19:05:47 +0200 Subject: [PATCH 0961/3326] thermal/drivers/hisi: Fix multiple alarm interrupts firing commit db2b0332608c8e648ea1e44727d36ad37cdb56cb upstream. The DT specifies a threshold of 65000, we setup the register with a value in the temperature resolution for the controller, 64656. When we reach 64656, the interrupt fires, the interrupt is disabled. Then the irq thread runs and calls thermal_zone_device_update() which will call in turn hisi_thermal_get_temp(). The function will look if the temperature decreased, assuming it was more than 65000, but that is not the case because the current temperature is 64656 (because of the rounding when setting the threshold). This condition being true, we re-enable the interrupt which fires immediately after exiting the irq thread. That happens again and again until the temperature goes to more than 65000. Potentially, there is here an interrupt storm if the temperature stabilizes at this temperature. A very unlikely case but possible. In any case, it does not make sense to handle dozens of alarm interrupt for nothing. Fix this by rounding the threshold value to the controller resolution so the check against the threshold is consistent with the one set in the controller. Signed-off-by: Daniel Lezcano Reviewed-by: Leo Yan Tested-by: Leo Yan Signed-off-by: Eduardo Valentin Signed-off-by: Kevin Wangtao Signed-off-by: Greg Kroah-Hartman --- drivers/thermal/hisi_thermal.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index ea77aac97ad3bd..6d8906d6547638 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -90,6 +90,12 @@ static inline long hisi_thermal_temp_to_step(long temp) return (temp - HISI_TEMP_BASE) / HISI_TEMP_STEP; } +static inline long hisi_thermal_round_temp(int temp) +{ + return hisi_thermal_step_to_temp( + hisi_thermal_temp_to_step(temp)); +} + static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data, struct hisi_thermal_sensor *sensor) { @@ -245,7 +251,7 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev) sensor = &data->sensors[data->irq_bind_sensor]; dev_crit(&data->pdev->dev, "THERMAL ALARM: T > %d\n", - sensor->thres_temp / 1000); + sensor->thres_temp); mutex_unlock(&data->thermal_lock); for (i = 0; i < HISI_MAX_SENSORS; i++) { @@ -284,7 +290,7 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev, for (i = 0; i < of_thermal_get_ntrips(sensor->tzd); i++) { if (trip[i].type == THERMAL_TRIP_PASSIVE) { - sensor->thres_temp = trip[i].temperature; + sensor->thres_temp = hisi_thermal_round_temp(trip[i].temperature); break; } } From 0237a0a456563d461814111db15ae26666ce730e Mon Sep 17 00:00:00 2001 From: Peter Hutterer Date: Mon, 4 Dec 2017 10:26:17 +1000 Subject: [PATCH 0962/3326] platform/x86: asus-wireless: send an EV_SYN/SYN_REPORT between state changes commit bff5bf9db1c9453ffd0a78abed3e2d040c092fd9 upstream. Sending the switch state change twice within the same frame is invalid evdev protocol and only works if the client handles keys immediately as well. Processing events immediately is incorrect, it forces a fake order of events that does not exist on the device. Recent versions of libinput changed to only process the device state and SYN_REPORT time, so now the key event is lost. https://bugs.freedesktop.org/show_bug.cgi?id=104041 Signed-off-by: Peter Hutterer Signed-off-by: Darren Hart (VMware) Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/asus-wireless.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c index f3796164329efb..d4aeac3477f550 100644 --- a/drivers/platform/x86/asus-wireless.c +++ b/drivers/platform/x86/asus-wireless.c @@ -118,6 +118,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event) return; } input_report_key(data->idev, KEY_RFKILL, 1); + input_sync(data->idev); input_report_key(data->idev, KEY_RFKILL, 0); input_sync(data->idev); } From 7d7545295e714751fa236c6284167342b066d825 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 7 Nov 2017 11:33:37 +0300 Subject: [PATCH 0963/3326] mm/sparsemem: Fix ARM64 boot crash when CONFIG_SPARSEMEM_EXTREME=y commit 629a359bdb0e0652a8227b4ff3125431995fec6e upstream. Since commit: 83e3c48729d9 ("mm/sparsemem: Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y") we allocate the mem_section array dynamically in sparse_memory_present_with_active_regions(), but some architectures, like arm64, don't call the routine to initialize sparsemem. Let's move the initialization into memory_present() it should cover all architectures. Reported-and-tested-by: Sudeep Holla Tested-by: Bjorn Andersson Signed-off-by: Kirill A. Shutemov Acked-by: Will Deacon Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Fixes: 83e3c48729d9 ("mm/sparsemem: Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y") Link: http://lkml.kernel.org/r/20171107083337.89952-1-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar Cc: Dan Rue Cc: Naresh Kamboju Signed-off-by: Greg Kroah-Hartman --- mm/page_alloc.c | 10 ---------- mm/sparse.c | 10 ++++++++++ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 02cf30bd01cd9d..d51c2087c498c6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5651,16 +5651,6 @@ void __init sparse_memory_present_with_active_regions(int nid) unsigned long start_pfn, end_pfn; int i, this_nid; -#ifdef CONFIG_SPARSEMEM_EXTREME - if (!mem_section) { - unsigned long size, align; - - size = sizeof(struct mem_section) * NR_SECTION_ROOTS; - align = 1 << (INTERNODE_CACHE_SHIFT); - mem_section = memblock_virt_alloc(size, align); - } -#endif - for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) memory_present(this_nid, start_pfn, end_pfn); } diff --git a/mm/sparse.c b/mm/sparse.c index 044138852bafce..60805abf98af71 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -207,6 +207,16 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) { unsigned long pfn; +#ifdef CONFIG_SPARSEMEM_EXTREME + if (unlikely(!mem_section)) { + unsigned long size, align; + + size = sizeof(struct mem_section) * NR_SECTION_ROOTS; + align = 1 << (INTERNODE_CACHE_SHIFT); + mem_section = memblock_virt_alloc(size, align); + } +#endif + start &= PAGE_SECTION_MASK; mminit_validate_memmodel_limits(&start, &end); for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { From 2b3ea8ceb2bb71e9e58527661261dba127137d9b Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 22 Dec 2017 16:22:59 +0100 Subject: [PATCH 0964/3326] bpf: fix branch pruning logic From: Alexei Starovoitov [ Upstream commit c131187db2d3fa2f8bf32fdf4e9a4ef805168467 ] when the verifier detects that register contains a runtime constant and it's compared with another constant it will prune exploration of the branch that is guaranteed not to be taken at runtime. This is all correct, but malicious program may be constructed in such a way that it always has a constant comparison and the other branch is never taken under any conditions. In this case such path through the program will not be explored by the verifier. It won't be taken at run-time either, but since all instructions are JITed the malicious program may cause JITs to complain about using reserved fields, etc. To fix the issue we have to track the instructions explored by the verifier and sanitize instructions that are dead at run time with NOPs. We cannot reject such dead code, since llvm generates it for valid C code, since it doesn't do as much data flow analysis as the verifier does. Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)") Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: Daniel Borkmann Signed-off-by: Greg Kroah-Hartman --- include/linux/bpf_verifier.h | 2 +- kernel/bpf/verifier.c | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index b8d200f60a4090..5d6de3b57758c7 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -110,7 +110,7 @@ struct bpf_insn_aux_data { struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ }; int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ - int converted_op_size; /* the valid value width after perceived conversion */ + bool seen; /* this insn was processed by the verifier */ }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c48ca2a34b5e13..201d6b965811f2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3665,6 +3665,7 @@ static int do_check(struct bpf_verifier_env *env) if (err) return err; + env->insn_aux_data[insn_idx].seen = true; if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) @@ -3855,6 +3856,7 @@ static int do_check(struct bpf_verifier_env *env) return err; insn_idx++; + env->insn_aux_data[insn_idx].seen = true; } else { verbose("invalid BPF_LD mode\n"); return -EINVAL; @@ -4035,6 +4037,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, u32 off, u32 cnt) { struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; + int i; if (cnt == 1) return 0; @@ -4044,6 +4047,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); + for (i = off; i < off + cnt - 1; i++) + new_data[i].seen = true; env->insn_aux_data = new_data; vfree(old_data); return 0; @@ -4062,6 +4067,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of return new_prog; } +/* The verifier does more data flow analysis than llvm and will not explore + * branches that are dead at run time. Malicious programs can have dead code + * too. Therefore replace all dead at-run-time code with nops. + */ +static void sanitize_dead_code(struct bpf_verifier_env *env) +{ + struct bpf_insn_aux_data *aux_data = env->insn_aux_data; + struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0); + struct bpf_insn *insn = env->prog->insnsi; + const int insn_cnt = env->prog->len; + int i; + + for (i = 0; i < insn_cnt; i++) { + if (aux_data[i].seen) + continue; + memcpy(insn + i, &nop, sizeof(nop)); + } +} + /* convert load instructions that access fields of 'struct __sk_buff' * into sequence of instructions that access fields of 'struct sk_buff' */ @@ -4378,6 +4402,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) while (pop_stack(env, NULL) >= 0); free_states(env); + if (ret == 0) + sanitize_dead_code(env); + if (ret == 0) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); From a23244e8845f510ce3ba8b77b32cdd3d3d8ae853 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 22 Dec 2017 16:23:00 +0100 Subject: [PATCH 0965/3326] bpf: fix corruption on concurrent perf_event_output calls [ Upstream commit 283ca526a9bd75aed7350220d7b1f8027d99c3fd ] When tracing and networking programs are both attached in the system and both use event-output helpers that eventually call into perf_event_output(), then we could end up in a situation where the tracing attached program runs in user context while a cls_bpf program is triggered on that same CPU out of softirq context. Since both rely on the same per-cpu perf_sample_data, we could potentially corrupt it. This can only ever happen in a combination of the two types; all tracing programs use a bpf_prog_active counter to bail out in case a program is already running on that CPU out of a different context. XDP and cls_bpf programs by themselves don't have this issue as they run in the same context only. Therefore, split both perf_sample_data so they cannot be accessed from each other. Fixes: 20b9d7ac4852 ("bpf: avoid excessive stack usage for perf_sample_data") Reported-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Tested-by: Song Liu Acked-by: Alexei Starovoitov Signed-off-by: Alexei Starovoitov Signed-off-by: Greg Kroah-Hartman --- kernel/trace/bpf_trace.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index dc498b605d5dd3..6350f64d5aa40e 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -293,14 +293,13 @@ static const struct bpf_func_proto bpf_perf_event_read_proto = { .arg2_type = ARG_ANYTHING, }; -static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd); +static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd); static __always_inline u64 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, - u64 flags, struct perf_raw_record *raw) + u64 flags, struct perf_sample_data *sd) { struct bpf_array *array = container_of(map, struct bpf_array, map); - struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd); unsigned int cpu = smp_processor_id(); u64 index = flags & BPF_F_INDEX_MASK; struct bpf_event_entry *ee; @@ -323,8 +322,6 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, if (unlikely(event->oncpu != cpu)) return -EOPNOTSUPP; - perf_sample_data_init(sd, 0, 0); - sd->raw = raw; perf_event_output(event, sd, regs); return 0; } @@ -332,6 +329,7 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, u64, flags, void *, data, u64, size) { + struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd); struct perf_raw_record raw = { .frag = { .size = size, @@ -342,7 +340,10 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, if (unlikely(flags & ~(BPF_F_INDEX_MASK))) return -EINVAL; - return __bpf_perf_event_output(regs, map, flags, &raw); + perf_sample_data_init(sd, 0, 0); + sd->raw = &raw; + + return __bpf_perf_event_output(regs, map, flags, sd); } static const struct bpf_func_proto bpf_perf_event_output_proto = { @@ -357,10 +358,12 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = { }; static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); +static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd); u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) { + struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd); struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); struct perf_raw_frag frag = { .copy = ctx_copy, @@ -378,8 +381,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, }; perf_fetch_caller_regs(regs); + perf_sample_data_init(sd, 0, 0); + sd->raw = &raw; - return __bpf_perf_event_output(regs, map, flags, &raw); + return __bpf_perf_event_output(regs, map, flags, sd); } BPF_CALL_0(bpf_get_current_task) From 83ab155d144922cb7421fb975e500901185e7644 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 22 Dec 2017 16:23:01 +0100 Subject: [PATCH 0966/3326] bpf, s390x: do not reload skb pointers in non-skb context [ Upstream commit 6d59b7dbf72ed20d0138e2f9b75ca3d4a9d4faca ] The assumption of unconditionally reloading skb pointers on BPF helper calls where bpf_helper_changes_pkt_data() holds true is wrong. There can be different contexts where the BPF helper would enforce a reload such as in case of XDP. Here, we do have a struct xdp_buff instead of struct sk_buff as context, thus this will access garbage. JITs only ever need to deal with cached skb pointer reload when ld_abs/ind was seen, therefore guard the reload behind SEEN_SKB only. Tested on s390x. Fixes: 9db7f2b81880 ("s390/bpf: recache skb->data/hlen for skb_vlan_push/pop") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Cc: Michael Holzheu Signed-off-by: Alexei Starovoitov Signed-off-by: Greg Kroah-Hartman --- arch/s390/net/bpf_jit_comp.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index b15cd2f0320f81..33e2785f684280 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -55,8 +55,7 @@ struct bpf_jit { #define SEEN_LITERAL 8 /* code uses literals */ #define SEEN_FUNC 16 /* calls C functions */ #define SEEN_TAIL_CALL 32 /* code uses tail calls */ -#define SEEN_SKB_CHANGE 64 /* code changes skb data */ -#define SEEN_REG_AX 128 /* code uses constant blinding */ +#define SEEN_REG_AX 64 /* code uses constant blinding */ #define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB) /* @@ -448,12 +447,12 @@ static void bpf_jit_prologue(struct bpf_jit *jit) EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, 152); } - if (jit->seen & SEEN_SKB) + if (jit->seen & SEEN_SKB) { emit_load_skb_data_hlen(jit); - if (jit->seen & SEEN_SKB_CHANGE) /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15, STK_OFF_SKBP); + } } /* @@ -983,8 +982,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT2(0x0d00, REG_14, REG_W1); /* lgr %b0,%r2: load return value into %b0 */ EMIT4(0xb9040000, BPF_REG_0, REG_2); - if (bpf_helper_changes_pkt_data((void *)func)) { - jit->seen |= SEEN_SKB_CHANGE; + if ((jit->seen & SEEN_SKB) && + bpf_helper_changes_pkt_data((void *)func)) { /* lg %b1,ST_OFF_SKBP(%r15) */ EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0, REG_15, STK_OFF_SKBP); From 8a681dfd8fb253cd3cac03a59e2867f9baad7934 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 22 Dec 2017 16:23:02 +0100 Subject: [PATCH 0967/3326] bpf, ppc64: do not reload skb pointers in non-skb context [ Upstream commit 87338c8e2cbb317b5f757e6172f94e2e3799cd20 ] The assumption of unconditionally reloading skb pointers on BPF helper calls where bpf_helper_changes_pkt_data() holds true is wrong. There can be different contexts where the helper would enforce a reload such as in case of XDP. Here, we do have a struct xdp_buff instead of struct sk_buff as context, thus this will access garbage. JITs only ever need to deal with cached skb pointer reload when ld_abs/ind was seen, therefore guard the reload behind SEEN_SKB. Fixes: 156d0e290e96 ("powerpc/ebpf/jit: Implement JIT compiler for extended BPF") Signed-off-by: Daniel Borkmann Reviewed-by: Naveen N. Rao Acked-by: Alexei Starovoitov Tested-by: Sandipan Das Signed-off-by: Alexei Starovoitov Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/net/bpf_jit_comp64.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index a66e64b0b251f7..5d115bd3253915 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -762,7 +762,8 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, func = (u8 *) __bpf_call_base + imm; /* Save skb pointer if we need to re-cache skb data */ - if (bpf_helper_changes_pkt_data(func)) + if ((ctx->seen & SEEN_SKB) && + bpf_helper_changes_pkt_data(func)) PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx)); bpf_jit_emit_func_call(image, ctx, (u64)func); @@ -771,7 +772,8 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_MR(b2p[BPF_REG_0], 3); /* refresh skb cache */ - if (bpf_helper_changes_pkt_data(func)) { + if ((ctx->seen & SEEN_SKB) && + bpf_helper_changes_pkt_data(func)) { /* reload skb pointer to r3 */ PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx)); bpf_jit_emit_skb_loads(image, ctx); From 82a9d62f603f0cb5549c4ca554f06e70510b7296 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 22 Dec 2017 16:23:03 +0100 Subject: [PATCH 0968/3326] bpf, sparc: fix usage of wrong reg for load_skb_regs after call [ Upstream commit 07aee94394547721ac168cbf4e1c09c14a5fe671 ] When LD_ABS/IND is used in the program, and we have a BPF helper call that changes packet data (bpf_helper_changes_pkt_data() returns true), then in case of sparc JIT, we try to reload cached skb data from bpf2sparc[BPF_REG_6]. However, there is no such guarantee or assumption that skb sits in R6 at this point, all helpers changing skb data only have a guarantee that skb sits in R1. Therefore, store BPF R1 in L7 temporarily and after procedure call use L7 to reload cached skb data. skb sitting in R6 is only true at the time when LD_ABS/IND is executed. Fixes: 7a12b5031c6b ("sparc64: Add eBPF JIT.") Signed-off-by: Daniel Borkmann Acked-by: David S. Miller Acked-by: Alexei Starovoitov Signed-off-by: Alexei Starovoitov Signed-off-by: Greg Kroah-Hartman --- arch/sparc/net/bpf_jit_comp_64.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 5765e7e711f782..ff5f9cb3039af1 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -1245,14 +1245,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) u8 *func = ((u8 *)__bpf_call_base) + imm; ctx->saw_call = true; + if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func)) + emit_reg_move(bpf2sparc[BPF_REG_1], L7, ctx); emit_call((u32 *)func, ctx); emit_nop(ctx); emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx); - if (bpf_helper_changes_pkt_data(func) && ctx->saw_ld_abs_ind) - load_skb_regs(ctx, bpf2sparc[BPF_REG_6]); + if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func)) + load_skb_regs(ctx, L7); break; } From 4d54f7df5131d67f653f674003ec5f52c9818b53 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 22 Dec 2017 16:23:04 +0100 Subject: [PATCH 0969/3326] bpf/verifier: fix bounds calculation on BPF_RSH From: Edward Cree [ Upstream commit 4374f256ce8182019353c0c639bb8d0695b4c941 ] Incorrect signed bounds were being computed. If the old upper signed bound was positive and the old lower signed bound was negative, this could cause the new upper signed bound to be too low, leading to security issues. Fixes: b03c9f9fdc37 ("bpf/verifier: track signed and unsigned min/max values") Reported-by: Jann Horn Signed-off-by: Edward Cree Acked-by: Alexei Starovoitov [jannh@google.com: changed description to reflect bug impact] Signed-off-by: Jann Horn Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 201d6b965811f2..c6ec04f9452555 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2157,20 +2157,22 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, mark_reg_unknown(regs, insn->dst_reg); break; } - /* BPF_RSH is an unsigned shift, so make the appropriate casts */ - if (dst_reg->smin_value < 0) { - if (umin_val) { - /* Sign bit will be cleared */ - dst_reg->smin_value = 0; - } else { - /* Lost sign bit information */ - dst_reg->smin_value = S64_MIN; - dst_reg->smax_value = S64_MAX; - } - } else { - dst_reg->smin_value = - (u64)(dst_reg->smin_value) >> umax_val; - } + /* BPF_RSH is an unsigned shift. If the value in dst_reg might + * be negative, then either: + * 1) src_reg might be zero, so the sign bit of the result is + * unknown, so we lose our signed bounds + * 2) it's known negative, thus the unsigned bounds capture the + * signed bounds + * 3) the signed bounds cross zero, so they tell us nothing + * about the result + * If the value in dst_reg is known nonnegative, then again the + * unsigned bounts capture the signed bounds. + * Thus, in all cases it suffices to blow away our signed bounds + * and rely on inferring new ones from the unsigned bounds and + * var_off of the result. + */ + dst_reg->smin_value = S64_MIN; + dst_reg->smax_value = S64_MAX; if (src_known) dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); From 6e12ea4fb45ca86cdd7425276b6993455fee947a Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 22 Dec 2017 16:23:05 +0100 Subject: [PATCH 0970/3326] bpf: fix incorrect sign extension in check_alu_op() From: Jann Horn [ Upstream commit 95a762e2c8c942780948091f8f2a4f32fce1ac6f ] Distinguish between BPF_ALU64|BPF_MOV|BPF_K (load 32-bit immediate, sign-extended to 64-bit) and BPF_ALU|BPF_MOV|BPF_K (load 32-bit immediate, zero-padded to 64-bit); only perform sign extension in the first case. Starting with v4.14, this is exploitable by unprivileged users as long as the unprivileged_bpf_disabled sysctl isn't set. Debian assigned CVE-2017-16995 for this issue. v3: - add CVE number (Ben Hutchings) Fixes: 484611357c19 ("bpf: allow access into map value arrays") Signed-off-by: Jann Horn Acked-by: Edward Cree Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c6ec04f9452555..2811ced3c2a991 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2374,7 +2374,13 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) * remember the value we stored into this reg */ regs[insn->dst_reg].type = SCALAR_VALUE; - __mark_reg_known(regs + insn->dst_reg, insn->imm); + if (BPF_CLASS(insn->code) == BPF_ALU64) { + __mark_reg_known(regs + insn->dst_reg, + insn->imm); + } else { + __mark_reg_known(regs + insn->dst_reg, + (u32)insn->imm); + } } } else if (opcode > BPF_END) { From bf5ee24e87e39548bf30d4e18e479e61a5a98336 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 22 Dec 2017 16:23:06 +0100 Subject: [PATCH 0971/3326] bpf: fix incorrect tracking of register size truncation From: Jann Horn [ Upstream commit 0c17d1d2c61936401f4702e1846e2c19b200f958 ] Properly handle register truncation to a smaller size. The old code first mirrors the clearing of the high 32 bits in the bitwise tristate representation, which is correct. But then, it computes the new arithmetic bounds as the intersection between the old arithmetic bounds and the bounds resulting from the bitwise tristate representation. Therefore, when coerce_reg_to_32() is called on a number with bounds [0xffff'fff8, 0x1'0000'0007], the verifier computes [0xffff'fff8, 0xffff'ffff] as bounds of the truncated number. This is incorrect: The truncated number could also be in the range [0, 7], and no meaningful arithmetic bounds can be computed in that case apart from the obvious [0, 0xffff'ffff]. Starting with v4.14, this is exploitable by unprivileged users as long as the unprivileged_bpf_disabled sysctl isn't set. Debian assigned CVE-2017-16996 for this issue. v2: - flip the mask during arithmetic bounds calculation (Ben Hutchings) v3: - add CVE number (Ben Hutchings) Fixes: b03c9f9fdc37 ("bpf/verifier: track signed and unsigned min/max values") Signed-off-by: Jann Horn Acked-by: Edward Cree Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 44 ++++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2811ced3c2a991..1e2afb264c31fe 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1068,6 +1068,29 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, return check_generic_ptr_alignment(reg, pointer_desc, off, size, strict); } +/* truncate register to smaller size (in bytes) + * must be called with size < BPF_REG_SIZE + */ +static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) +{ + u64 mask; + + /* clear high bits in bit representation */ + reg->var_off = tnum_cast(reg->var_off, size); + + /* fix arithmetic bounds */ + mask = ((u64)1 << (size * 8)) - 1; + if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { + reg->umin_value &= mask; + reg->umax_value &= mask; + } else { + reg->umin_value = 0; + reg->umax_value = mask; + } + reg->smin_value = reg->umin_value; + reg->smax_value = reg->umax_value; +} + /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory @@ -1200,9 +1223,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && state->regs[value_regno].type == SCALAR_VALUE) { /* b/h/w load zero-extends, mark upper bits as known 0 */ - state->regs[value_regno].var_off = tnum_cast( - state->regs[value_regno].var_off, size); - __update_reg_bounds(&state->regs[value_regno]); + coerce_reg_to_size(&state->regs[value_regno], size); } return err; } @@ -1742,14 +1763,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) return 0; } -static void coerce_reg_to_32(struct bpf_reg_state *reg) -{ - /* clear high 32 bits */ - reg->var_off = tnum_cast(reg->var_off, 4); - /* Update bounds */ - __update_reg_bounds(reg); -} - static bool signed_add_overflows(s64 a, s64 b) { /* Do the add in u64, where overflow is well-defined */ @@ -1984,8 +1997,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->64 */ - coerce_reg_to_32(dst_reg); - coerce_reg_to_32(&src_reg); + coerce_reg_to_size(dst_reg, 4); + coerce_reg_to_size(&src_reg, 4); } smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; @@ -2364,10 +2377,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) return -EACCES; } mark_reg_unknown(regs, insn->dst_reg); - /* high 32 bits are known zero. */ - regs[insn->dst_reg].var_off = tnum_cast( - regs[insn->dst_reg].var_off, 4); - __update_reg_bounds(®s[insn->dst_reg]); + coerce_reg_to_size(®s[insn->dst_reg], 4); } } else { /* case: R = imm From 6c8e098d0324412d4ae9e06c7e611a96b87faf80 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 22 Dec 2017 16:23:07 +0100 Subject: [PATCH 0972/3326] bpf: fix 32-bit ALU op verification From: Jann Horn [ Upstream commit 468f6eafa6c44cb2c5d8aad35e12f06c240a812a ] 32-bit ALU ops operate on 32-bit values and have 32-bit outputs. Adjust the verifier accordingly. Fixes: f1174f77b50c ("bpf/verifier: rework value tracking") Signed-off-by: Jann Horn Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 1e2afb264c31fe..0c7e4c8a2b8a51 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1984,6 +1984,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, return 0; } +/* WARNING: This function does calculations on 64-bit values, but the actual + * execution may occur on 32-bit values. Therefore, things like bitshifts + * need extra checks in the 32-bit case. + */ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, @@ -1994,12 +1998,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, bool src_known, dst_known; s64 smin_val, smax_val; u64 umin_val, umax_val; + u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; - if (BPF_CLASS(insn->code) != BPF_ALU64) { - /* 32-bit ALU ops are (32,32)->64 */ - coerce_reg_to_size(dst_reg, 4); - coerce_reg_to_size(&src_reg, 4); - } smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; @@ -2135,9 +2135,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, __update_reg_bounds(dst_reg); break; case BPF_LSH: - if (umax_val > 63) { - /* Shifts greater than 63 are undefined. This includes - * shifts by a negative number. + if (umax_val >= insn_bitness) { + /* Shifts greater than 31 or 63 are undefined. + * This includes shifts by a negative number. */ mark_reg_unknown(regs, insn->dst_reg); break; @@ -2163,9 +2163,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, __update_reg_bounds(dst_reg); break; case BPF_RSH: - if (umax_val > 63) { - /* Shifts greater than 63 are undefined. This includes - * shifts by a negative number. + if (umax_val >= insn_bitness) { + /* Shifts greater than 31 or 63 are undefined. + * This includes shifts by a negative number. */ mark_reg_unknown(regs, insn->dst_reg); break; @@ -2201,6 +2201,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, break; } + if (BPF_CLASS(insn->code) != BPF_ALU64) { + /* 32-bit ALU ops are (32,32)->32 */ + coerce_reg_to_size(dst_reg, 4); + coerce_reg_to_size(&src_reg, 4); + } + __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; From 2120fca0ecfb4552d27608d409ebd3403ce02ce4 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 22 Dec 2017 16:23:08 +0100 Subject: [PATCH 0973/3326] bpf: fix missing error return in check_stack_boundary() From: Jann Horn Prevent indirect stack accesses at non-constant addresses, which would permit reading and corrupting spilled pointers. Fixes: f1174f77b50c ("bpf/verifier: rework value tracking") Signed-off-by: Jann Horn Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0c7e4c8a2b8a51..8aa98a0591d6f4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1303,6 +1303,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); verbose("invalid variable stack read R%d var_off=%s\n", regno, tn_buf); + return -EACCES; } off = regs[regno].off + regs[regno].var_off.value; if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || From c90268f7cbee0781331b96d1423d0f28a6183889 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 22 Dec 2017 16:23:09 +0100 Subject: [PATCH 0974/3326] bpf: force strict alignment checks for stack pointers From: Jann Horn [ Upstream commit a5ec6ae161d72f01411169a938fa5f8baea16e8f ] Force strict alignment checks for stack pointers because the tracking of stack spills relies on it; unaligned stack accesses can lead to corruption of spilled registers, which is exploitable. Fixes: f1174f77b50c ("bpf/verifier: rework value tracking") Signed-off-by: Jann Horn Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8aa98a0591d6f4..8c353554628e23 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1061,6 +1061,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, break; case PTR_TO_STACK: pointer_desc = "stack "; + /* The stack spill tracking logic in check_stack_write() + * and check_stack_read() relies on stack accesses being + * aligned. + */ + strict = true; break; default: break; From cb56cc1b292b8b3f787fad89f1208f8e98d12c7d Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 22 Dec 2017 16:23:10 +0100 Subject: [PATCH 0975/3326] bpf: don't prune branches when a scalar is replaced with a pointer From: Jann Horn [ Upstream commit 179d1c5602997fef5a940c6ddcf31212cbfebd14 ] This could be made safe by passing through a reference to env and checking for env->allow_ptr_leaks, but it would only work one way and is probably not worth the hassle - not doing it will not directly lead to program rejection. Fixes: f1174f77b50c ("bpf/verifier: rework value tracking") Signed-off-by: Jann Horn Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8c353554628e23..5a30eda17c4fe2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3337,15 +3337,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); } else { - /* if we knew anything about the old value, we're not - * equal, because we can't know anything about the - * scalar value of the pointer in the new value. + /* We're trying to use a pointer in place of a scalar. + * Even if the scalar was unbounded, this could lead to + * pointer leaks because scalars are allowed to leak + * while pointers are not. We could make this safe in + * special cases if root is calling us, but it's + * probably not worth the hassle. */ - return rold->umin_value == 0 && - rold->umax_value == U64_MAX && - rold->smin_value == S64_MIN && - rold->smax_value == S64_MAX && - tnum_is_unknown(rold->var_off); + return false; } case PTR_TO_MAP_VALUE: /* If the new min/max/var_off satisfy the old ones and From de31796c052e47c99b1bb342bc70aa826733e862 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 22 Dec 2017 16:23:11 +0100 Subject: [PATCH 0976/3326] bpf: fix integer overflows From: Alexei Starovoitov [ Upstream commit bb7f0f989ca7de1153bd128a40a71709e339fa03 ] There were various issues related to the limited size of integers used in the verifier: - `off + size` overflow in __check_map_access() - `off + reg->off` overflow in check_mem_access() - `off + reg->var_off.value` overflow or 32-bit truncation of `reg->var_off.value` in check_mem_access() - 32-bit truncation in check_stack_boundary() Make sure that any integer math cannot overflow by not allowing pointer math with large values. Also reduce the scope of "scalar op scalar" tracking. Fixes: f1174f77b50c ("bpf/verifier: rework value tracking") Reported-by: Jann Horn Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Signed-off-by: Greg Kroah-Hartman --- include/linux/bpf_verifier.h | 4 +-- kernel/bpf/verifier.c | 48 ++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 5d6de3b57758c7..73bec75b74c806 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -15,11 +15,11 @@ * In practice this is far bigger than any realistic pointer offset; this limit * ensures that umax_value + (int)off + (int)size cannot overflow a u64. */ -#define BPF_MAX_VAR_OFF (1ULL << 31) +#define BPF_MAX_VAR_OFF (1 << 29) /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures * that converting umax_value to int cannot overflow. */ -#define BPF_MAX_VAR_SIZ INT_MAX +#define BPF_MAX_VAR_SIZ (1 << 29) /* Liveness marks, used for registers and spilled-regs (in stack slots). * Read marks propagate upwards until they find a write mark; they record that diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5a30eda17c4fe2..c5ff809e86d0d3 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1789,6 +1789,41 @@ static bool signed_sub_overflows(s64 a, s64 b) return res > a; } +static bool check_reg_sane_offset(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, + enum bpf_reg_type type) +{ + bool known = tnum_is_const(reg->var_off); + s64 val = reg->var_off.value; + s64 smin = reg->smin_value; + + if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { + verbose("math between %s pointer and %lld is not allowed\n", + reg_type_str[type], val); + return false; + } + + if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { + verbose("%s pointer offset %d is not allowed\n", + reg_type_str[type], reg->off); + return false; + } + + if (smin == S64_MIN) { + verbose("math between %s pointer and register with unbounded min value is not allowed\n", + reg_type_str[type]); + return false; + } + + if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { + verbose("value %lld makes %s pointer be out of bounds\n", + smin, reg_type_str[type]); + return false; + } + + return true; +} + /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. * Caller should also handle BPF_MOV case separately. * If we return -EACCES, caller may want to try again treating pointer as a @@ -1854,6 +1889,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, dst_reg->type = ptr_reg->type; dst_reg->id = ptr_reg->id; + if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || + !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) + return -EINVAL; + switch (opcode) { case BPF_ADD: /* We can take a fixed offset as long as it doesn't overflow @@ -1984,6 +2023,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, return -EACCES; } + if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) + return -EINVAL; + __update_reg_bounds(dst_reg); __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); @@ -2013,6 +2055,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); + if (!src_known && + opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { + __mark_reg_unknown(dst_reg); + return 0; + } + switch (opcode) { case BPF_ADD: if (signed_add_overflows(dst_reg->smin_value, smin_val) || From d605778b613a535d258c1e6a931bc2153aa63185 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 22 Dec 2017 16:23:12 +0100 Subject: [PATCH 0977/3326] selftests/bpf: add tests for recent bugfixes From: Jann Horn [ Upstream commit 2255f8d520b0a318fc6d387d0940854b2f522a7f ] These tests should cover the following cases: - MOV with both zero-extended and sign-extended immediates - implicit truncation of register contents via ALU32/MOV32 - implicit 32-bit truncation of ALU32 output - oversized register source operand for ALU32 shift - right-shift of a number that could be positive or negative - map access where adding the operation size to the offset causes signed 32-bit overflow - direct stack access at a ~4GiB offset Also remove the F_LOAD_WITH_STRICT_ALIGNMENT flag from a bunch of tests that should fail independent of what flags userspace passes. Signed-off-by: Jann Horn Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/bpf/test_verifier.c | 549 +++++++++++++++++++- 1 file changed, 533 insertions(+), 16 deletions(-) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 64ae21f644896a..7a2d221c4702a7 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -606,7 +606,6 @@ static struct bpf_test tests[] = { }, .errstr = "misaligned stack access", .result = REJECT, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "invalid map_fd for function call", @@ -1797,7 +1796,6 @@ static struct bpf_test tests[] = { }, .result = REJECT, .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8", - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "PTR_TO_STACK store/load - bad alignment on reg", @@ -1810,7 +1808,6 @@ static struct bpf_test tests[] = { }, .result = REJECT, .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8", - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "PTR_TO_STACK store/load - out of bounds low", @@ -6115,7 +6112,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr = "R0 min value is negative", + .errstr = "unbounded min value", .result = REJECT, }, { @@ -6139,7 +6136,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr = "R0 min value is negative", + .errstr = "unbounded min value", .result = REJECT, }, { @@ -6165,7 +6162,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr = "R8 invalid mem access 'inv'", + .errstr = "unbounded min value", .result = REJECT, }, { @@ -6190,7 +6187,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr = "R8 invalid mem access 'inv'", + .errstr = "unbounded min value", .result = REJECT, }, { @@ -6238,7 +6235,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr = "R0 min value is negative", + .errstr = "unbounded min value", .result = REJECT, }, { @@ -6309,7 +6306,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr = "R0 min value is negative", + .errstr = "unbounded min value", .result = REJECT, }, { @@ -6360,7 +6357,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr = "R0 min value is negative", + .errstr = "unbounded min value", .result = REJECT, }, { @@ -6387,7 +6384,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr = "R0 min value is negative", + .errstr = "unbounded min value", .result = REJECT, }, { @@ -6413,7 +6410,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr = "R0 min value is negative", + .errstr = "unbounded min value", .result = REJECT, }, { @@ -6442,7 +6439,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr = "R0 min value is negative", + .errstr = "unbounded min value", .result = REJECT, }, { @@ -6472,7 +6469,7 @@ static struct bpf_test tests[] = { BPF_JMP_IMM(BPF_JA, 0, 0, -7), }, .fixup_map1 = { 4 }, - .errstr = "R0 min value is negative", + .errstr = "unbounded min value", .result = REJECT, }, { @@ -6500,8 +6497,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer comparison prohibited", - .errstr = "R0 min value is negative", + .errstr = "unbounded min value", .result = REJECT, .result_unpriv = REJECT, }, @@ -6556,6 +6552,462 @@ static struct bpf_test tests[] = { .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", .result = REJECT, }, + { + "bounds check based on zero-extended MOV", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + /* r2 = 0x0000'0000'ffff'ffff */ + BPF_MOV32_IMM(BPF_REG_2, 0xffffffff), + /* r2 = 0 */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), + /* no-op */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + /* access at offset 0 */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .result = ACCEPT + }, + { + "bounds check based on sign-extended MOV. test1", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + /* r2 = 0xffff'ffff'ffff'ffff */ + BPF_MOV64_IMM(BPF_REG_2, 0xffffffff), + /* r2 = 0xffff'ffff */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), + /* r0 = */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + /* access to OOB pointer */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .errstr = "map_value pointer and 4294967295", + .result = REJECT + }, + { + "bounds check based on sign-extended MOV. test2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + /* r2 = 0xffff'ffff'ffff'ffff */ + BPF_MOV64_IMM(BPF_REG_2, 0xffffffff), + /* r2 = 0xfff'ffff */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36), + /* r0 = */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + /* access to OOB pointer */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .errstr = "R0 min value is outside of the array range", + .result = REJECT + }, + { + "bounds check based on reg_off + var_off + insn_off. test1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 4 }, + .errstr = "value_size=8 off=1073741825", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "bounds check based on reg_off + var_off + insn_off. test2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 4 }, + .errstr = "value 1073741823", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "bounds check after truncation of non-boundary-crossing range", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + /* r1 = [0x00, 0xff] */ + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_2, 1), + /* r2 = 0x10'0000'0000 */ + BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36), + /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), + /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), + /* r1 = [0x00, 0xff] */ + BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff), + /* r1 = 0 */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), + /* no-op */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + /* access at offset 0 */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .result = ACCEPT + }, + { + "bounds check after truncation of boundary-crossing range (1)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + /* r1 = [0x00, 0xff] */ + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = [0xffff'ff80, 0x1'0000'007f] */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = [0xffff'ff80, 0xffff'ffff] or + * [0x0000'0000, 0x0000'007f] + */ + BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = [0x00, 0xff] or + * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] + */ + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = 0 or + * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] + */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), + /* no-op or OOB pointer computation */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + /* potentially OOB access */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + /* not actually fully unbounded, but the bound is very high */ + .errstr = "R0 unbounded memory access", + .result = REJECT + }, + { + "bounds check after truncation of boundary-crossing range (2)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + /* r1 = [0x00, 0xff] */ + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = [0xffff'ff80, 0x1'0000'007f] */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = [0xffff'ff80, 0xffff'ffff] or + * [0x0000'0000, 0x0000'007f] + * difference to previous test: truncation via MOV32 + * instead of ALU32. + */ + BPF_MOV32_REG(BPF_REG_1, BPF_REG_1), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = [0x00, 0xff] or + * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] + */ + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = 0 or + * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] + */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), + /* no-op or OOB pointer computation */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + /* potentially OOB access */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + /* not actually fully unbounded, but the bound is very high */ + .errstr = "R0 unbounded memory access", + .result = REJECT + }, + { + "bounds check after wrapping 32-bit addition", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + /* r1 = 0x7fff'ffff */ + BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff), + /* r1 = 0xffff'fffe */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), + /* r1 = 0 */ + BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2), + /* no-op */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + /* access at offset 0 */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .result = ACCEPT + }, + { + "bounds check after shift with oversized count operand", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_IMM(BPF_REG_2, 32), + BPF_MOV64_IMM(BPF_REG_1, 1), + /* r1 = (u32)1 << (u32)32 = ? */ + BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2), + /* r1 = [0x0000, 0xffff] */ + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff), + /* computes unknown pointer, potentially OOB */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + /* potentially OOB access */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .errstr = "R0 max value is outside of the array range", + .result = REJECT + }, + { + "bounds check after right shift of maybe-negative number", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + /* r1 = [0x00, 0xff] */ + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + /* r1 = [-0x01, 0xfe] */ + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1), + /* r1 = 0 or 0xff'ffff'ffff'ffff */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), + /* r1 = 0 or 0xffff'ffff'ffff */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), + /* computes unknown pointer, potentially OOB */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + /* potentially OOB access */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .errstr = "R0 unbounded memory access", + .result = REJECT + }, + { + "bounds check map access with off+size signed 32bit overflow. test1", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_JMP_A(0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .errstr = "map_value pointer and 2147483646", + .result = REJECT + }, + { + "bounds check map access with off+size signed 32bit overflow. test2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_JMP_A(0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .errstr = "pointer offset 1073741822", + .result = REJECT + }, + { + "bounds check map access with off+size signed 32bit overflow. test3", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2), + BPF_JMP_A(0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .errstr = "pointer offset -1073741822", + .result = REJECT + }, + { + "bounds check map access with off+size signed 32bit overflow. test4", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_1, 1000000), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2), + BPF_JMP_A(0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .errstr = "map_value pointer and 1000000000000", + .result = REJECT + }, + { + "pointer/scalar confusion in state equality check (way 1)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_JMP_A(1), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), + BPF_JMP_A(0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 leaks addr as return value" + }, + { + "pointer/scalar confusion in state equality check (way 2)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), + BPF_JMP_A(1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 leaks addr as return value" + }, { "variable-offset ctx access", .insns = { @@ -6597,6 +7049,71 @@ static struct bpf_test tests[] = { .result = REJECT, .prog_type = BPF_PROG_TYPE_LWT_IN, }, + { + "indirect variable-offset stack access", + .insns = { + /* Fill the top 8 bytes of the stack */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + /* Get an unknown value */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), + /* Make it small and 4-byte aligned */ + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8), + /* add it to fp. We now have either fp-4 or fp-8, but + * we don't know which + */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), + /* dereference it indirectly */ + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 5 }, + .errstr = "variable stack read R2", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_LWT_IN, + }, + { + "direct stack access with 32-bit wraparound. test1", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_EXIT_INSN() + }, + .errstr = "fp pointer and 2147483647", + .result = REJECT + }, + { + "direct stack access with 32-bit wraparound. test2", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_EXIT_INSN() + }, + .errstr = "fp pointer and 1073741823", + .result = REJECT + }, + { + "direct stack access with 32-bit wraparound. test3", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_EXIT_INSN() + }, + .errstr = "fp pointer offset 1073741822", + .result = REJECT + }, { "liveness pruning and write screening", .insns = { From a9772285a7246c2a816da7f50c8c1e94b264770d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 24 Oct 2017 11:22:46 +0100 Subject: [PATCH 0978/3326] linux/compiler.h: Split into compiler.h and compiler_types.h commit d15155824c5014803d91b829736d249c500bdda6 upstream. linux/compiler.h is included indirectly by linux/types.h via uapi/linux/types.h -> uapi/linux/posix_types.h -> linux/stddef.h -> uapi/linux/stddef.h and is needed to provide a proper definition of offsetof. Unfortunately, compiler.h requires a definition of smp_read_barrier_depends() for defining lockless_dereference() and soon for defining READ_ONCE(), which means that all users of READ_ONCE() will need to include asm/barrier.h to avoid splats such as: In file included from include/uapi/linux/stddef.h:1:0, from include/linux/stddef.h:4, from arch/h8300/kernel/asm-offsets.c:11: include/linux/list.h: In function 'list_empty': >> include/linux/compiler.h:343:2: error: implicit declaration of function 'smp_read_barrier_depends' [-Werror=implicit-function-declaration] smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ ^ A better alternative is to include asm/barrier.h in linux/compiler.h, but this requires a type definition for "bool" on some architectures (e.g. x86), which is defined later by linux/types.h. Type "bool" is also used directly in linux/compiler.h, so the whole thing is pretty fragile. This patch splits compiler.h in two: compiler_types.h contains type annotations, definitions and the compiler-specific parts, whereas compiler.h #includes compiler-types.h and additionally defines macros such as {READ,WRITE.ACCESS}_ONCE(). uapi/linux/stddef.h and linux/linkage.h are then moved over to include linux/compiler_types.h, which fixes the build for h8 and blackfin. Signed-off-by: Will Deacon Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1508840570-22169-2-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/ptrace.h | 3 +- arch/sparc/include/asm/ptrace.h | 1 + arch/um/include/shared/init.h | 2 +- include/linux/compiler-clang.h | 2 +- include/linux/compiler-gcc.h | 2 +- include/linux/compiler-intel.h | 2 +- include/linux/compiler.h | 265 +----------------------------- include/linux/compiler_types.h | 274 ++++++++++++++++++++++++++++++++ include/linux/linkage.h | 2 +- include/uapi/linux/stddef.h | 2 +- scripts/headers_install.sh | 2 +- 11 files changed, 286 insertions(+), 271 deletions(-) create mode 100644 include/linux/compiler_types.h diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index e9c9a117bd25d5..c7cdbb43ae7c4b 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -126,8 +126,7 @@ extern unsigned long profile_pc(struct pt_regs *regs); /* * kprobe-based event tracer support */ -#include -#include +#include #define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0)) extern int regs_query_register_offset(const char *name); diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h index 6a339a78f4f42b..71dd82b43cc57d 100644 --- a/arch/sparc/include/asm/ptrace.h +++ b/arch/sparc/include/asm/ptrace.h @@ -7,6 +7,7 @@ #if defined(__sparc__) && defined(__arch64__) #ifndef __ASSEMBLY__ +#include #include #include diff --git a/arch/um/include/shared/init.h b/arch/um/include/shared/init.h index 390572daa40de1..b3f5865a92c911 100644 --- a/arch/um/include/shared/init.h +++ b/arch/um/include/shared/init.h @@ -41,7 +41,7 @@ typedef int (*initcall_t)(void); typedef void (*exitcall_t)(void); -#include +#include /* These are for everybody (although not all archs will actually discard it in modules) */ diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 780b1242bf24de..3b609edffa8fb6 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_COMPILER_H +#ifndef __LINUX_COMPILER_TYPES_H #error "Please don't include directly, include instead." #endif diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index bb78e5bdff2633..2272ded07496d6 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_COMPILER_H +#ifndef __LINUX_COMPILER_TYPES_H #error "Please don't include directly, include instead." #endif diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 523d1b74550f20..bfa08160db3a46 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_COMPILER_H +#ifndef __LINUX_COMPILER_TYPES_H #error "Please don't include directly, include instead." #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 712cd8bb00b4b9..fab5dc250c61a8 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -2,111 +2,12 @@ #ifndef __LINUX_COMPILER_H #define __LINUX_COMPILER_H -#ifndef __ASSEMBLY__ +#include -#ifdef __CHECKER__ -# define __user __attribute__((noderef, address_space(1))) -# define __kernel __attribute__((address_space(0))) -# define __safe __attribute__((safe)) -# define __force __attribute__((force)) -# define __nocast __attribute__((nocast)) -# define __iomem __attribute__((noderef, address_space(2))) -# define __must_hold(x) __attribute__((context(x,1,1))) -# define __acquires(x) __attribute__((context(x,0,1))) -# define __releases(x) __attribute__((context(x,1,0))) -# define __acquire(x) __context__(x,1) -# define __release(x) __context__(x,-1) -# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) -# define __percpu __attribute__((noderef, address_space(3))) -# define __rcu __attribute__((noderef, address_space(4))) -# define __private __attribute__((noderef)) -extern void __chk_user_ptr(const volatile void __user *); -extern void __chk_io_ptr(const volatile void __iomem *); -# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) -#else /* __CHECKER__ */ -# ifdef STRUCTLEAK_PLUGIN -# define __user __attribute__((user)) -# else -# define __user -# endif -# define __kernel -# define __safe -# define __force -# define __nocast -# define __iomem -# define __chk_user_ptr(x) (void)0 -# define __chk_io_ptr(x) (void)0 -# define __builtin_warning(x, y...) (1) -# define __must_hold(x) -# define __acquires(x) -# define __releases(x) -# define __acquire(x) (void)0 -# define __release(x) (void)0 -# define __cond_lock(x,c) (c) -# define __percpu -# define __rcu -# define __private -# define ACCESS_PRIVATE(p, member) ((p)->member) -#endif /* __CHECKER__ */ - -/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ -#define ___PASTE(a,b) a##b -#define __PASTE(a,b) ___PASTE(a,b) +#ifndef __ASSEMBLY__ #ifdef __KERNEL__ -#ifdef __GNUC__ -#include -#endif - -#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) -#define notrace __attribute__((hotpatch(0,0))) -#else -#define notrace __attribute__((no_instrument_function)) -#endif - -/* Intel compiler defines __GNUC__. So we will overwrite implementations - * coming from above header files here - */ -#ifdef __INTEL_COMPILER -# include -#endif - -/* Clang compiler defines __GNUC__. So we will overwrite implementations - * coming from above header files here - */ -#ifdef __clang__ -#include -#endif - -/* - * Generic compiler-dependent macros required for kernel - * build go below this comment. Actual compiler/compiler version - * specific implementations come from the above header files - */ - -struct ftrace_branch_data { - const char *func; - const char *file; - unsigned line; - union { - struct { - unsigned long correct; - unsigned long incorrect; - }; - struct { - unsigned long miss; - unsigned long hit; - }; - unsigned long miss_hit[2]; - }; -}; - -struct ftrace_likely_data { - struct ftrace_branch_data data; - unsigned long constant; -}; - /* * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code * to disable branch tracing on a per file basis. @@ -333,6 +234,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * with an explicit memory barrier or atomic instruction that provides the * required ordering. */ +#include #define __READ_ONCE(x, check) \ ({ \ @@ -364,167 +266,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s #endif /* __ASSEMBLY__ */ -#ifdef __KERNEL__ -/* - * Allow us to mark functions as 'deprecated' and have gcc emit a nice - * warning for each use, in hopes of speeding the functions removal. - * Usage is: - * int __deprecated foo(void) - */ -#ifndef __deprecated -# define __deprecated /* unimplemented */ -#endif - -#ifdef MODULE -#define __deprecated_for_modules __deprecated -#else -#define __deprecated_for_modules -#endif - -#ifndef __must_check -#define __must_check -#endif - -#ifndef CONFIG_ENABLE_MUST_CHECK -#undef __must_check -#define __must_check -#endif -#ifndef CONFIG_ENABLE_WARN_DEPRECATED -#undef __deprecated -#undef __deprecated_for_modules -#define __deprecated -#define __deprecated_for_modules -#endif - -#ifndef __malloc -#define __malloc -#endif - -/* - * Allow us to avoid 'defined but not used' warnings on functions and data, - * as well as force them to be emitted to the assembly file. - * - * As of gcc 3.4, static functions that are not marked with attribute((used)) - * may be elided from the assembly file. As of gcc 3.4, static data not so - * marked will not be elided, but this may change in a future gcc version. - * - * NOTE: Because distributions shipped with a backported unit-at-a-time - * compiler in gcc 3.3, we must define __used to be __attribute__((used)) - * for gcc >=3.3 instead of 3.4. - * - * In prior versions of gcc, such functions and data would be emitted, but - * would be warned about except with attribute((unused)). - * - * Mark functions that are referenced only in inline assembly as __used so - * the code is emitted even though it appears to be unreferenced. - */ -#ifndef __used -# define __used /* unimplemented */ -#endif - -#ifndef __maybe_unused -# define __maybe_unused /* unimplemented */ -#endif - -#ifndef __always_unused -# define __always_unused /* unimplemented */ -#endif - -#ifndef noinline -#define noinline -#endif - -/* - * Rather then using noinline to prevent stack consumption, use - * noinline_for_stack instead. For documentation reasons. - */ -#define noinline_for_stack noinline - -#ifndef __always_inline -#define __always_inline inline -#endif - -#endif /* __KERNEL__ */ - -/* - * From the GCC manual: - * - * Many functions do not examine any values except their arguments, - * and have no effects except the return value. Basically this is - * just slightly more strict class than the `pure' attribute above, - * since function is not allowed to read global memory. - * - * Note that a function that has pointer arguments and examines the - * data pointed to must _not_ be declared `const'. Likewise, a - * function that calls a non-`const' function usually must not be - * `const'. It does not make sense for a `const' function to return - * `void'. - */ -#ifndef __attribute_const__ -# define __attribute_const__ /* unimplemented */ -#endif - -#ifndef __designated_init -# define __designated_init -#endif - -#ifndef __latent_entropy -# define __latent_entropy -#endif - -#ifndef __randomize_layout -# define __randomize_layout __designated_init -#endif - -#ifndef __no_randomize_layout -# define __no_randomize_layout -#endif - -#ifndef randomized_struct_fields_start -# define randomized_struct_fields_start -# define randomized_struct_fields_end -#endif - -/* - * Tell gcc if a function is cold. The compiler will assume any path - * directly leading to the call is unlikely. - */ - -#ifndef __cold -#define __cold -#endif - -/* Simple shorthand for a section definition */ -#ifndef __section -# define __section(S) __attribute__ ((__section__(#S))) -#endif - -#ifndef __visible -#define __visible -#endif - -#ifndef __nostackprotector -# define __nostackprotector -#endif - -/* - * Assume alignment of return value. - */ -#ifndef __assume_aligned -#define __assume_aligned(a, ...) -#endif - - -/* Are two types/vars the same type (ignoring qualifiers)? */ -#ifndef __same_type -# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) -#endif - -/* Is this type a native word size -- useful for atomic operations */ -#ifndef __native_word -# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) -#endif - /* Compile time object size, -1 for unknown */ #ifndef __compiletime_object_size # define __compiletime_object_size(obj) -1 diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h new file mode 100644 index 00000000000000..6b79a9bba9a763 --- /dev/null +++ b/include/linux/compiler_types.h @@ -0,0 +1,274 @@ +#ifndef __LINUX_COMPILER_TYPES_H +#define __LINUX_COMPILER_TYPES_H + +#ifndef __ASSEMBLY__ + +#ifdef __CHECKER__ +# define __user __attribute__((noderef, address_space(1))) +# define __kernel __attribute__((address_space(0))) +# define __safe __attribute__((safe)) +# define __force __attribute__((force)) +# define __nocast __attribute__((nocast)) +# define __iomem __attribute__((noderef, address_space(2))) +# define __must_hold(x) __attribute__((context(x,1,1))) +# define __acquires(x) __attribute__((context(x,0,1))) +# define __releases(x) __attribute__((context(x,1,0))) +# define __acquire(x) __context__(x,1) +# define __release(x) __context__(x,-1) +# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) +# define __percpu __attribute__((noderef, address_space(3))) +# define __rcu __attribute__((noderef, address_space(4))) +# define __private __attribute__((noderef)) +extern void __chk_user_ptr(const volatile void __user *); +extern void __chk_io_ptr(const volatile void __iomem *); +# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) +#else /* __CHECKER__ */ +# ifdef STRUCTLEAK_PLUGIN +# define __user __attribute__((user)) +# else +# define __user +# endif +# define __kernel +# define __safe +# define __force +# define __nocast +# define __iomem +# define __chk_user_ptr(x) (void)0 +# define __chk_io_ptr(x) (void)0 +# define __builtin_warning(x, y...) (1) +# define __must_hold(x) +# define __acquires(x) +# define __releases(x) +# define __acquire(x) (void)0 +# define __release(x) (void)0 +# define __cond_lock(x,c) (c) +# define __percpu +# define __rcu +# define __private +# define ACCESS_PRIVATE(p, member) ((p)->member) +#endif /* __CHECKER__ */ + +/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ +#define ___PASTE(a,b) a##b +#define __PASTE(a,b) ___PASTE(a,b) + +#ifdef __KERNEL__ + +#ifdef __GNUC__ +#include +#endif + +#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) +#define notrace __attribute__((hotpatch(0,0))) +#else +#define notrace __attribute__((no_instrument_function)) +#endif + +/* Intel compiler defines __GNUC__. So we will overwrite implementations + * coming from above header files here + */ +#ifdef __INTEL_COMPILER +# include +#endif + +/* Clang compiler defines __GNUC__. So we will overwrite implementations + * coming from above header files here + */ +#ifdef __clang__ +#include +#endif + +/* + * Generic compiler-dependent macros required for kernel + * build go below this comment. Actual compiler/compiler version + * specific implementations come from the above header files + */ + +struct ftrace_branch_data { + const char *func; + const char *file; + unsigned line; + union { + struct { + unsigned long correct; + unsigned long incorrect; + }; + struct { + unsigned long miss; + unsigned long hit; + }; + unsigned long miss_hit[2]; + }; +}; + +struct ftrace_likely_data { + struct ftrace_branch_data data; + unsigned long constant; +}; + +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +#ifdef __KERNEL__ +/* + * Allow us to mark functions as 'deprecated' and have gcc emit a nice + * warning for each use, in hopes of speeding the functions removal. + * Usage is: + * int __deprecated foo(void) + */ +#ifndef __deprecated +# define __deprecated /* unimplemented */ +#endif + +#ifdef MODULE +#define __deprecated_for_modules __deprecated +#else +#define __deprecated_for_modules +#endif + +#ifndef __must_check +#define __must_check +#endif + +#ifndef CONFIG_ENABLE_MUST_CHECK +#undef __must_check +#define __must_check +#endif +#ifndef CONFIG_ENABLE_WARN_DEPRECATED +#undef __deprecated +#undef __deprecated_for_modules +#define __deprecated +#define __deprecated_for_modules +#endif + +#ifndef __malloc +#define __malloc +#endif + +/* + * Allow us to avoid 'defined but not used' warnings on functions and data, + * as well as force them to be emitted to the assembly file. + * + * As of gcc 3.4, static functions that are not marked with attribute((used)) + * may be elided from the assembly file. As of gcc 3.4, static data not so + * marked will not be elided, but this may change in a future gcc version. + * + * NOTE: Because distributions shipped with a backported unit-at-a-time + * compiler in gcc 3.3, we must define __used to be __attribute__((used)) + * for gcc >=3.3 instead of 3.4. + * + * In prior versions of gcc, such functions and data would be emitted, but + * would be warned about except with attribute((unused)). + * + * Mark functions that are referenced only in inline assembly as __used so + * the code is emitted even though it appears to be unreferenced. + */ +#ifndef __used +# define __used /* unimplemented */ +#endif + +#ifndef __maybe_unused +# define __maybe_unused /* unimplemented */ +#endif + +#ifndef __always_unused +# define __always_unused /* unimplemented */ +#endif + +#ifndef noinline +#define noinline +#endif + +/* + * Rather then using noinline to prevent stack consumption, use + * noinline_for_stack instead. For documentation reasons. + */ +#define noinline_for_stack noinline + +#ifndef __always_inline +#define __always_inline inline +#endif + +#endif /* __KERNEL__ */ + +/* + * From the GCC manual: + * + * Many functions do not examine any values except their arguments, + * and have no effects except the return value. Basically this is + * just slightly more strict class than the `pure' attribute above, + * since function is not allowed to read global memory. + * + * Note that a function that has pointer arguments and examines the + * data pointed to must _not_ be declared `const'. Likewise, a + * function that calls a non-`const' function usually must not be + * `const'. It does not make sense for a `const' function to return + * `void'. + */ +#ifndef __attribute_const__ +# define __attribute_const__ /* unimplemented */ +#endif + +#ifndef __designated_init +# define __designated_init +#endif + +#ifndef __latent_entropy +# define __latent_entropy +#endif + +#ifndef __randomize_layout +# define __randomize_layout __designated_init +#endif + +#ifndef __no_randomize_layout +# define __no_randomize_layout +#endif + +#ifndef randomized_struct_fields_start +# define randomized_struct_fields_start +# define randomized_struct_fields_end +#endif + +/* + * Tell gcc if a function is cold. The compiler will assume any path + * directly leading to the call is unlikely. + */ + +#ifndef __cold +#define __cold +#endif + +/* Simple shorthand for a section definition */ +#ifndef __section +# define __section(S) __attribute__ ((__section__(#S))) +#endif + +#ifndef __visible +#define __visible +#endif + +#ifndef __nostackprotector +# define __nostackprotector +#endif + +/* + * Assume alignment of return value. + */ +#ifndef __assume_aligned +#define __assume_aligned(a, ...) +#endif + + +/* Are two types/vars the same type (ignoring qualifiers)? */ +#ifndef __same_type +# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) +#endif + +/* Is this type a native word size -- useful for atomic operations */ +#ifndef __native_word +# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) +#endif + +#endif /* __LINUX_COMPILER_TYPES_H */ diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 2e6f90bd52aa62..f68db9e450eb3f 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -2,7 +2,7 @@ #ifndef _LINUX_LINKAGE_H #define _LINUX_LINKAGE_H -#include +#include #include #include #include diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index f65b92e0e1f914..ee8220f8dcf5f5 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#include +#include #ifndef __always_inline #define __always_inline inline diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh index 4d1ea96e8794c1..a18bca72099573 100755 --- a/scripts/headers_install.sh +++ b/scripts/headers_install.sh @@ -34,7 +34,7 @@ do sed -r \ -e 's/([ \t(])(__user|__force|__iomem)[ \t]/\1/g' \ -e 's/__attribute_const__([ \t]|$)/\1/g' \ - -e 's@^#include @@' \ + -e 's@^#include @@' \ -e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \ -e 's/(^|[ \t(])(inline|asm|volatile)([ \t(]|$)/\1__\2__\3/g' \ -e 's@#(ifndef|define|endif[ \t]*/[*])[ \t]*_UAPI@#\1 @' \ From dad5c1402c570cd07a80113784bc20a7f930c8ae Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 25 Dec 2017 14:26:48 +0100 Subject: [PATCH 0979/3326] Linux 4.14.9 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 41632ea2505513..ed2132c6d28606 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 8 +SUBLEVEL = 9 EXTRAVERSION = NAME = Petit Gorille From add9f2a47035fe94603b8ffe8d1d323b5fb0429f Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 27 Dec 2017 13:53:08 +0100 Subject: [PATCH 0980/3326] Revert "ipv6: grab rt->rt6i_ref before allocating pcpu rt" This reverts commit 9704f8147e88213f2fa580f713b42b08a4f1a7d2 which was upstream commit a94b9367e044ba672c9f4105eb1516ff6ff4948a. Shouldn't have been here, sorry about that. Reported-by: Chris Rankin Reported-by: Willy Tarreau Cc: Ido Schimmel Cc: Ozgur Cc: Wei Wang Cc: Martin KaFai Lau Cc: Eric Dumazet Cc: David S. Miller Cc: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv6/route.c | 58 ++++++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 76b47682f77f27..598efa8cfe250c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1055,6 +1055,7 @@ static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt) { + struct fib6_table *table = rt->rt6i_table; struct rt6_info *pcpu_rt, *prev, **p; pcpu_rt = ip6_rt_pcpu_alloc(rt); @@ -1065,20 +1066,28 @@ static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt) return net->ipv6.ip6_null_entry; } - dst_hold(&pcpu_rt->dst); - p = this_cpu_ptr(rt->rt6i_pcpu); - prev = cmpxchg(p, NULL, pcpu_rt); - if (prev) { - /* If someone did it before us, return prev instead */ - /* release refcnt taken by ip6_rt_pcpu_alloc() */ - dst_release_immediate(&pcpu_rt->dst); - /* release refcnt taken by above dst_hold() */ + read_lock_bh(&table->tb6_lock); + if (rt->rt6i_pcpu) { + p = this_cpu_ptr(rt->rt6i_pcpu); + prev = cmpxchg(p, NULL, pcpu_rt); + if (prev) { + /* If someone did it before us, return prev instead */ + dst_release_immediate(&pcpu_rt->dst); + pcpu_rt = prev; + } + } else { + /* rt has been removed from the fib6 tree + * before we have a chance to acquire the read_lock. + * In this case, don't brother to create a pcpu rt + * since rt is going away anyway. The next + * dst_check() will trigger a re-lookup. + */ dst_release_immediate(&pcpu_rt->dst); - dst_hold(&prev->dst); - pcpu_rt = prev; + pcpu_rt = rt; } - + dst_hold(&pcpu_rt->dst); rt6_dst_from_metrics_check(pcpu_rt); + read_unlock_bh(&table->tb6_lock); return pcpu_rt; } @@ -1168,28 +1177,19 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, if (pcpu_rt) { read_unlock_bh(&table->tb6_lock); } else { - /* atomic_inc_not_zero() is needed when using rcu */ - if (atomic_inc_not_zero(&rt->rt6i_ref)) { - /* We have to do the read_unlock first - * because rt6_make_pcpu_route() may trigger - * ip6_dst_gc() which will take the write_lock. - * - * No dst_hold() on rt is needed because grabbing - * rt->rt6i_ref makes sure rt can't be released. - */ - read_unlock_bh(&table->tb6_lock); - pcpu_rt = rt6_make_pcpu_route(rt); - rt6_release(rt); - } else { - /* rt is already removed from tree */ - read_unlock_bh(&table->tb6_lock); - pcpu_rt = net->ipv6.ip6_null_entry; - dst_hold(&pcpu_rt->dst); - } + /* We have to do the read_unlock first + * because rt6_make_pcpu_route() may trigger + * ip6_dst_gc() which will take the write_lock. + */ + dst_hold(&rt->dst); + read_unlock_bh(&table->tb6_lock); + pcpu_rt = rt6_make_pcpu_route(rt); + dst_release(&rt->dst); } trace_fib6_table_lookup(net, pcpu_rt, table->tb6_id, fl6); return pcpu_rt; + } } EXPORT_SYMBOL_GPL(ip6_pol_route); From 62c37437a110cfec0d2b46f5d28113a631187978 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 6 Nov 2017 07:21:50 -0600 Subject: [PATCH 0981/3326] objtool: Move synced files to their original relative locations commit 6a77cff819ae3e31992bde6432c9b5720748a89b upstream. This will enable more straightforward comparisons, and it also makes the files 100% identical. Suggested-by: Ingo Molnar Signed-off-by: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/407b2aaa317741f48fcf821592c0e96ab3be1890.1509974346.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/objtool/.gitignore | 2 +- tools/objtool/Makefile | 22 ++++++++++--------- tools/objtool/arch/x86/Build | 10 ++++----- tools/objtool/arch/x86/decode.c | 6 ++--- .../arch/x86/{insn => include/asm}/inat.h | 2 +- .../x86/{insn => include/asm}/inat_types.h | 0 .../arch/x86/{insn => include/asm}/insn.h | 2 +- .../{ => arch/x86/include/asm}/orc_types.h | 0 tools/objtool/arch/x86/{insn => lib}/inat.c | 2 +- tools/objtool/arch/x86/{insn => lib}/insn.c | 4 ++-- .../arch/x86/{insn => lib}/x86-opcode-map.txt | 0 .../x86/{insn => tools}/gen-insn-attr-x86.awk | 0 tools/objtool/orc.h | 2 +- 13 files changed, 27 insertions(+), 25 deletions(-) rename tools/objtool/arch/x86/{insn => include/asm}/inat.h (99%) rename tools/objtool/arch/x86/{insn => include/asm}/inat_types.h (100%) rename tools/objtool/arch/x86/{insn => include/asm}/insn.h (99%) rename tools/objtool/{ => arch/x86/include/asm}/orc_types.h (100%) rename tools/objtool/arch/x86/{insn => lib}/inat.c (99%) rename tools/objtool/arch/x86/{insn => lib}/insn.c (99%) rename tools/objtool/arch/x86/{insn => lib}/x86-opcode-map.txt (100%) rename tools/objtool/arch/x86/{insn => tools}/gen-insn-attr-x86.awk (100%) diff --git a/tools/objtool/.gitignore b/tools/objtool/.gitignore index d3102c865a95e0..914cff12899b65 100644 --- a/tools/objtool/.gitignore +++ b/tools/objtool/.gitignore @@ -1,3 +1,3 @@ -arch/x86/insn/inat-tables.c +arch/x86/lib/inat-tables.c objtool fixdep diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 424b1965d06f2f..c6a19d946ec1be 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -25,7 +25,9 @@ OBJTOOL_IN := $(OBJTOOL)-in.o all: $(OBJTOOL) -INCLUDES := -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi +INCLUDES := -I$(srctree)/tools/include \ + -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ + -I$(srctree)/tools/objtool/arch/$(HOSTARCH)/include WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) LDFLAGS += -lelf $(LIBSUBCMD) @@ -46,16 +48,16 @@ $(OBJTOOL_IN): fixdep FORCE $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN) @(diff -I 2>&1 | grep -q 'option requires an argument' && \ test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \ - diff -I'^#include' arch/x86/insn/insn.c ../../arch/x86/lib/insn.c >/dev/null && \ - diff -I'^#include' arch/x86/insn/inat.c ../../arch/x86/lib/inat.c >/dev/null && \ - diff arch/x86/insn/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \ - diff arch/x86/insn/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \ - diff -I'^#include' arch/x86/insn/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \ - diff -I'^#include' arch/x86/insn/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \ - diff -I'^#include' arch/x86/insn/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \ + diff arch/x86/lib/insn.c ../../arch/x86/lib/insn.c >/dev/null && \ + diff arch/x86/lib/inat.c ../../arch/x86/lib/inat.c >/dev/null && \ + diff arch/x86/lib/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \ + diff arch/x86/tools/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \ + diff arch/x86/include/asm/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \ + diff arch/x86/include/asm/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \ + diff arch/x86/include/asm/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \ || echo "warning: objtool: x86 instruction decoder differs from kernel" >&2 )) || true @(test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \ - diff ../../arch/x86/include/asm/orc_types.h orc_types.h >/dev/null) \ + diff ../../arch/x86/include/asm/orc_types.h arch/x86/include/asm/orc_types.h >/dev/null) \ || echo "warning: objtool: orc_types.h differs from kernel" >&2 )) || true $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@ @@ -66,7 +68,7 @@ $(LIBSUBCMD): fixdep FORCE clean: $(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL) $(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete - $(Q)$(RM) $(OUTPUT)arch/x86/insn/inat-tables.c $(OUTPUT)fixdep + $(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep FORCE: diff --git a/tools/objtool/arch/x86/Build b/tools/objtool/arch/x86/Build index debbdb0b5c430b..b998412c017d91 100644 --- a/tools/objtool/arch/x86/Build +++ b/tools/objtool/arch/x86/Build @@ -1,12 +1,12 @@ objtool-y += decode.o -inat_tables_script = arch/x86/insn/gen-insn-attr-x86.awk -inat_tables_maps = arch/x86/insn/x86-opcode-map.txt +inat_tables_script = arch/x86/tools/gen-insn-attr-x86.awk +inat_tables_maps = arch/x86/lib/x86-opcode-map.txt -$(OUTPUT)arch/x86/insn/inat-tables.c: $(inat_tables_script) $(inat_tables_maps) +$(OUTPUT)arch/x86/lib/inat-tables.c: $(inat_tables_script) $(inat_tables_maps) $(call rule_mkdir) $(Q)$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ -$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/insn/inat-tables.c +$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/lib/inat-tables.c -CFLAGS_decode.o += -I$(OUTPUT)arch/x86/insn +CFLAGS_decode.o += -I$(OUTPUT)arch/x86/lib diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 34a579f806e390..8acfc47af70efd 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -19,9 +19,9 @@ #include #define unlikely(cond) (cond) -#include "insn/insn.h" -#include "insn/inat.c" -#include "insn/insn.c" +#include +#include "lib/inat.c" +#include "lib/insn.c" #include "../../elf.h" #include "../../arch.h" diff --git a/tools/objtool/arch/x86/insn/inat.h b/tools/objtool/arch/x86/include/asm/inat.h similarity index 99% rename from tools/objtool/arch/x86/insn/inat.h rename to tools/objtool/arch/x86/include/asm/inat.h index 125ecd2a300d78..02aff08672115d 100644 --- a/tools/objtool/arch/x86/insn/inat.h +++ b/tools/objtool/arch/x86/include/asm/inat.h @@ -20,7 +20,7 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ -#include "inat_types.h" +#include /* * Internal bits. Don't use bitmasks directly, because these bits are diff --git a/tools/objtool/arch/x86/insn/inat_types.h b/tools/objtool/arch/x86/include/asm/inat_types.h similarity index 100% rename from tools/objtool/arch/x86/insn/inat_types.h rename to tools/objtool/arch/x86/include/asm/inat_types.h diff --git a/tools/objtool/arch/x86/insn/insn.h b/tools/objtool/arch/x86/include/asm/insn.h similarity index 99% rename from tools/objtool/arch/x86/insn/insn.h rename to tools/objtool/arch/x86/include/asm/insn.h index e23578c7b1be9d..b3e32b010ab194 100644 --- a/tools/objtool/arch/x86/insn/insn.h +++ b/tools/objtool/arch/x86/include/asm/insn.h @@ -21,7 +21,7 @@ */ /* insn_attr_t is defined in inat.h */ -#include "inat.h" +#include struct insn_field { union { diff --git a/tools/objtool/orc_types.h b/tools/objtool/arch/x86/include/asm/orc_types.h similarity index 100% rename from tools/objtool/orc_types.h rename to tools/objtool/arch/x86/include/asm/orc_types.h diff --git a/tools/objtool/arch/x86/insn/inat.c b/tools/objtool/arch/x86/lib/inat.c similarity index 99% rename from tools/objtool/arch/x86/insn/inat.c rename to tools/objtool/arch/x86/lib/inat.c index e4bf28e6f4c7a5..c1f01a8e9f65ec 100644 --- a/tools/objtool/arch/x86/insn/inat.c +++ b/tools/objtool/arch/x86/lib/inat.c @@ -18,7 +18,7 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ -#include "insn.h" +#include /* Attribute tables are generated from opcode map */ #include "inat-tables.c" diff --git a/tools/objtool/arch/x86/insn/insn.c b/tools/objtool/arch/x86/lib/insn.c similarity index 99% rename from tools/objtool/arch/x86/insn/insn.c rename to tools/objtool/arch/x86/lib/insn.c index ca983e2bea8b2d..1088eb8f3a5fea 100644 --- a/tools/objtool/arch/x86/insn/insn.c +++ b/tools/objtool/arch/x86/lib/insn.c @@ -23,8 +23,8 @@ #else #include #endif -#include "inat.h" -#include "insn.h" +#include +#include /* Verify next sizeof(t) bytes can be on the same instruction */ #define validate_next(t, insn, n) \ diff --git a/tools/objtool/arch/x86/insn/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt similarity index 100% rename from tools/objtool/arch/x86/insn/x86-opcode-map.txt rename to tools/objtool/arch/x86/lib/x86-opcode-map.txt diff --git a/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk similarity index 100% rename from tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk rename to tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk diff --git a/tools/objtool/orc.h b/tools/objtool/orc.h index a4139e386ef374..b0e92a6d0903b0 100644 --- a/tools/objtool/orc.h +++ b/tools/objtool/orc.h @@ -18,7 +18,7 @@ #ifndef _ORC_H #define _ORC_H -#include "orc_types.h" +#include struct objtool_file; From 2845aee45c363e15c1e677b1f4d0043f361c102b Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 6 Nov 2017 07:21:51 -0600 Subject: [PATCH 0982/3326] objtool: Move kernel headers/code sync check to a script commit 3bd51c5a371de917e4e7401c9df006b5998579df upstream. Replace the nasty diff checks in the objtool Makefile with a clean bash script, and make the warnings more specific. Heavily inspired by tools/perf/check-headers.sh. Suggested-by: Ingo Molnar Signed-off-by: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/ab015f15ccd8c0c6008493c3c6ee3d495eaf2927.1509974346.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/objtool/Makefile | 16 +--------------- tools/objtool/sync-check.sh | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 15 deletions(-) create mode 100755 tools/objtool/sync-check.sh diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index c6a19d946ec1be..6aaed251b4ed8a 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -43,22 +43,8 @@ include $(srctree)/tools/build/Makefile.include $(OBJTOOL_IN): fixdep FORCE @$(MAKE) $(build)=objtool -# Busybox's diff doesn't have -I, avoid warning in that case -# $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN) - @(diff -I 2>&1 | grep -q 'option requires an argument' && \ - test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \ - diff arch/x86/lib/insn.c ../../arch/x86/lib/insn.c >/dev/null && \ - diff arch/x86/lib/inat.c ../../arch/x86/lib/inat.c >/dev/null && \ - diff arch/x86/lib/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \ - diff arch/x86/tools/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \ - diff arch/x86/include/asm/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \ - diff arch/x86/include/asm/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \ - diff arch/x86/include/asm/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \ - || echo "warning: objtool: x86 instruction decoder differs from kernel" >&2 )) || true - @(test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \ - diff ../../arch/x86/include/asm/orc_types.h arch/x86/include/asm/orc_types.h >/dev/null) \ - || echo "warning: objtool: orc_types.h differs from kernel" >&2 )) || true + @./sync-check.sh $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@ diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh new file mode 100755 index 00000000000000..1470e74e9d6611 --- /dev/null +++ b/tools/objtool/sync-check.sh @@ -0,0 +1,29 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +FILES=' +arch/x86/lib/insn.c +arch/x86/lib/inat.c +arch/x86/lib/x86-opcode-map.txt +arch/x86/tools/gen-insn-attr-x86.awk +arch/x86/include/asm/insn.h +arch/x86/include/asm/inat.h +arch/x86/include/asm/inat_types.h +arch/x86/include/asm/orc_types.h +' + +check() +{ + local file=$1 + + diff $file ../../$file > /dev/null || + echo "Warning: synced file at 'tools/objtool/$file' differs from latest kernel version at '$file'" +} + +if [ ! -d ../../kernel ] || [ ! -d ../../tools ] || [ ! -d ../objtool ]; then + exit 0 +fi + +for i in $FILES; do + check $i +done From 3033f9e601f45f372d48bd266629abd681cea116 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Tue, 7 Nov 2017 21:01:52 -0600 Subject: [PATCH 0983/3326] objtool: Fix cross-build commit 9eb719855f6c9b21eb5889d9ac2ca1c60527ad89 upstream. Stephen Rothwell reported this cross-compilation build failure: | In file included from orc_dump.c:19:0: | orc.h:21:10: fatal error: asm/orc_types.h: No such file or directory | ... Caused by: 6a77cff819ae ("objtool: Move synced files to their original relative locations") Use the proper arch header files location, not the host-arch location. Bisected-by: Stephen Rothwell Reported-by: Stephen Rothwell Signed-off-by: Josh Poimboeuf Cc: Linus Torvalds Cc: Linux-Next Mailing List Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20171108030152.bd76eahiwjwjt3kp@treble Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/objtool/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 6aaed251b4ed8a..0f94af3ccaaa25 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -27,7 +27,7 @@ all: $(OBJTOOL) INCLUDES := -I$(srctree)/tools/include \ -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ - -I$(srctree)/tools/objtool/arch/$(HOSTARCH)/include + -I$(srctree)/tools/objtool/arch/$(ARCH)/include WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) LDFLAGS += -lelf $(LIBSUBCMD) From 6a8f7688094c05e02b392b98e6977497a5096e20 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 14 Nov 2017 07:24:22 +0100 Subject: [PATCH 0984/3326] tools/headers: Sync objtool UAPI header commit a356d2ae50790f49858ebed35da9e206336fafee upstream. objtool grew this new warning: Warning: synced file at 'tools/objtool/arch/x86/include/asm/inat.h' differs from latest kernel version at 'arch/x86/include/asm/inat.h' which upstream header grew new INAT_SEG_* definitions. Sync up the tooling version of the header. Reported-by: Linus Torvalds Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/objtool/arch/x86/include/asm/inat.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/objtool/arch/x86/include/asm/inat.h b/tools/objtool/arch/x86/include/asm/inat.h index 02aff08672115d..1c78580e58bea3 100644 --- a/tools/objtool/arch/x86/include/asm/inat.h +++ b/tools/objtool/arch/x86/include/asm/inat.h @@ -97,6 +97,16 @@ #define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) #define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) +/* Identifiers for segment registers */ +#define INAT_SEG_REG_IGNORE 0 +#define INAT_SEG_REG_DEFAULT 1 +#define INAT_SEG_REG_CS 2 +#define INAT_SEG_REG_SS 3 +#define INAT_SEG_REG_DS 4 +#define INAT_SEG_REG_ES 5 +#define INAT_SEG_REG_FS 6 +#define INAT_SEG_REG_GS 7 + /* Attribute search APIs */ extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); extern int inat_get_last_prefix_id(insn_byte_t last_pfx); From 76358c8d90c73c896000ccee3375b709bc955bc5 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sat, 2 Dec 2017 16:17:44 -0600 Subject: [PATCH 0985/3326] objtool: Fix 64-bit build on 32-bit host commit 14c47b54b0d9389e3ca0718e805cdd90c5a4303a upstream. The new ORC unwinder breaks the build of a 64-bit kernel on a 32-bit host. Building the kernel on a i386 or x32 host fails with: orc_dump.c: In function 'orc_dump': orc_dump.c:105:26: error: passing argument 2 of 'elf_getshdrnum' from incompatible pointer type [-Werror=incompatible-pointer-types] if (elf_getshdrnum(elf, &nr_sections)) { ^ In file included from /usr/local/include/gelf.h:32:0, from elf.h:22, from warn.h:26, from orc_dump.c:20: /usr/local/include/libelf.h:304:12: note: expected 'size_t * {aka unsigned int *}' but argument is of type 'long unsigned int *' extern int elf_getshdrnum (Elf *__elf, size_t *__dst); ^~~~~~~~~~~~~~ orc_dump.c:190:17: error: format '%lx' expects argument of type 'long unsigned int', but argument 3 has type 'Elf64_Sxword {aka long long int}' [-Werror=format=] printf("%s+%lx:", name, rela.r_addend); ~~^ ~~~~~~~~~~~~~ %llx Fix the build failure. Another problem is that if the user specifies HOSTCC or HOSTLD variables, they are ignored in the objtool makefile. Change the Makefile to respect these variables. Signed-off-by: Mikulas Patocka Signed-off-by: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sven Joachim Cc: Thomas Gleixner Fixes: 627fce14809b ("objtool: Add ORC unwind table generation") Link: http://lkml.kernel.org/r/19f0e64d8e07e30a7b307cd010eb780c404fe08d.1512252895.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/objtool/Makefile | 8 +++++--- tools/objtool/orc_dump.c | 7 ++++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 0f94af3ccaaa25..ae0272f9a09184 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -7,9 +7,11 @@ ARCH := x86 endif # always use the host compiler -CC = gcc -LD = ld -AR = ar +HOSTCC ?= gcc +HOSTLD ?= ld +CC = $(HOSTCC) +LD = $(HOSTLD) +AR = ar ifeq ($(srctree),) srctree := $(patsubst %/,%,$(dir $(CURDIR))) diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c index 36c5bf6a267514..c3343820916a6d 100644 --- a/tools/objtool/orc_dump.c +++ b/tools/objtool/orc_dump.c @@ -76,7 +76,8 @@ int orc_dump(const char *_objname) int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0; struct orc_entry *orc = NULL; char *name; - unsigned long nr_sections, orc_ip_addr = 0; + size_t nr_sections; + Elf64_Addr orc_ip_addr = 0; size_t shstrtab_idx; Elf *elf; Elf_Scn *scn; @@ -187,10 +188,10 @@ int orc_dump(const char *_objname) return -1; } - printf("%s+%lx:", name, rela.r_addend); + printf("%s+%llx:", name, (unsigned long long)rela.r_addend); } else { - printf("%lx:", orc_ip_addr + (i * sizeof(int)) + orc_ip[i]); + printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i])); } From da8eb8ad0e695ed34b114df28505d4dd5b4ea693 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 11 Dec 2017 10:38:36 -0800 Subject: [PATCH 0986/3326] x86/decoder: Fix and update the opcodes map commit f5b5fab1780c98b74526dbac527574bd02dc16f8 upstream Update x86-opcode-map.txt based on the October 2017 Intel SDM publication. Fix INVPID to INVVPID. Add UD0 and UD1 instruction opcodes. Also sync the objtool and perf tooling copies of this file. Signed-off-by: Randy Dunlap Acked-by: Masami Hiramatsu Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/aac062d7-c0f6-96e3-5c92-ed299e2bd3da@infradead.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/lib/x86-opcode-map.txt | 13 +++++++++++-- tools/objtool/arch/x86/lib/x86-opcode-map.txt | 15 ++++++++++++--- .../perf/util/intel-pt-decoder/x86-opcode-map.txt | 15 ++++++++++++--- 3 files changed, 35 insertions(+), 8 deletions(-) diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index c4d55919fac19e..e0b85930dd773e 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1) fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) -ff: +ff: UD0 EndTable Table: 3-byte opcode 1 (0x0f 0x38) @@ -717,7 +717,7 @@ AVXcode: 2 7e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 80: INVEPT Gy,Mdq (66) -81: INVPID Gy,Mdq (66) +81: INVVPID Gy,Mdq (66) 82: INVPCID Gy,Mdq (66) 83: vpmultishiftqb Vx,Hx,Wx (66),(ev) 88: vexpandps/d Vpd,Wpd (66),(ev) @@ -970,6 +970,15 @@ GrpTable: Grp9 EndTable GrpTable: Grp10 +# all are UD1 +0: UD1 +1: UD1 +2: UD1 +3: UD1 +4: UD1 +5: UD1 +6: UD1 +7: UD1 EndTable # Grp11A and Grp11B are expressed as Grp11 in Intel SDM diff --git a/tools/objtool/arch/x86/lib/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt index 12e377184ee4ad..e0b85930dd773e 100644 --- a/tools/objtool/arch/x86/lib/x86-opcode-map.txt +++ b/tools/objtool/arch/x86/lib/x86-opcode-map.txt @@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1) fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) -ff: +ff: UD0 EndTable Table: 3-byte opcode 1 (0x0f 0x38) @@ -717,7 +717,7 @@ AVXcode: 2 7e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 80: INVEPT Gy,Mdq (66) -81: INVPID Gy,Mdq (66) +81: INVVPID Gy,Mdq (66) 82: INVPCID Gy,Mdq (66) 83: vpmultishiftqb Vx,Hx,Wx (66),(ev) 88: vexpandps/d Vpd,Wpd (66),(ev) @@ -896,7 +896,7 @@ EndTable GrpTable: Grp3_1 0: TEST Eb,Ib -1: +1: TEST Eb,Ib 2: NOT Eb 3: NEG Eb 4: MUL AL,Eb @@ -970,6 +970,15 @@ GrpTable: Grp9 EndTable GrpTable: Grp10 +# all are UD1 +0: UD1 +1: UD1 +2: UD1 +3: UD1 +4: UD1 +5: UD1 +6: UD1 +7: UD1 EndTable # Grp11A and Grp11B are expressed as Grp11 in Intel SDM diff --git a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt index 12e377184ee4ad..e0b85930dd773e 100644 --- a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt +++ b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt @@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1) fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) -ff: +ff: UD0 EndTable Table: 3-byte opcode 1 (0x0f 0x38) @@ -717,7 +717,7 @@ AVXcode: 2 7e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 80: INVEPT Gy,Mdq (66) -81: INVPID Gy,Mdq (66) +81: INVVPID Gy,Mdq (66) 82: INVPCID Gy,Mdq (66) 83: vpmultishiftqb Vx,Hx,Wx (66),(ev) 88: vexpandps/d Vpd,Wpd (66),(ev) @@ -896,7 +896,7 @@ EndTable GrpTable: Grp3_1 0: TEST Eb,Ib -1: +1: TEST Eb,Ib 2: NOT Eb 3: NEG Eb 4: MUL AL,Eb @@ -970,6 +970,15 @@ GrpTable: Grp9 EndTable GrpTable: Grp10 +# all are UD1 +0: UD1 +1: UD1 +2: UD1 +3: UD1 +4: UD1 +5: UD1 +6: UD1 +7: UD1 EndTable # Grp11A and Grp11B are expressed as Grp11 in Intel SDM From adc37e209d954ffa235d036c1b71ca84691ae4f7 Mon Sep 17 00:00:00 2001 From: Ricardo Neri Date: Fri, 27 Oct 2017 13:25:40 -0700 Subject: [PATCH 0987/3326] x86/insn-eval: Add utility functions to get segment selector commit 32d0b95300db03c2b23b2ea2c94769a4a138e79d upstream. [note, only the inat.h portion, to get objtool back in sync - gregkh] b0caa8c8c6bbc422bc3c32b64852d6d618f32b49 Mon Sep 17 00:00:00 2001 When computing a linear address and segmentation is used, we need to know the base address of the segment involved in the computation. In most of the cases, the segment base address will be zero as in USER_DS/USER32_DS. However, it may be possible that a user space program defines its own segments via a local descriptor table. In such a case, the segment base address may not be zero. Thus, the segment base address is needed to calculate correctly the linear address. If running in protected mode, the segment selector to be used when computing a linear address is determined by either any of segment override prefixes in the instruction or inferred from the registers involved in the computation of the effective address; in that order. Also, there are cases when the segment override prefixes shall be ignored (i.e., code segments are always selected by the CS segment register; string instructions always use the ES segment register when using rDI register as operand). In long mode, segment registers are ignored, except for FS and GS. In these two cases, base addresses are obtained from the respective MSRs. For clarity, this process can be split into four steps (and an equal number of functions): determine if segment prefixes overrides can be used; parse the segment override prefixes, and use them if found; if not found or cannot be used, use the default segment registers associated with the operand registers. Once the segment register to use has been identified, read its value to obtain the segment selector. The method to obtain the segment selector depends on several factors. In 32-bit builds, segment selectors are saved into a pt_regs structure when switching to kernel mode. The same is also true for virtual-8086 mode. In 64-bit builds, segmentation is mostly ignored, except when running a program in 32-bit legacy mode. In this case, CS and SS can be obtained from pt_regs. DS, ES, FS and GS can be read directly from the respective segment registers. In order to identify the segment registers, a new set of #defines is introduced. It also includes two special identifiers. One of them indicates when the default segment register associated with instruction operands shall be used. Another one indicates that the contents of the segment register shall be ignored; this identifier is used when in long mode. Improvements-by: Borislav Petkov Signed-off-by: Ricardo Neri Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: "Michael S. Tsirkin" Cc: Peter Zijlstra Cc: Dave Hansen Cc: ricardo.neri@intel.com Cc: Adrian Hunter Cc: Paul Gortmaker Cc: Huang Rui Cc: Qiaowei Ren Cc: Shuah Khan Cc: Kees Cook Cc: Jonathan Corbet Cc: Jiri Slaby Cc: Dmitry Vyukov Cc: "Ravi V. Shankar" Cc: Chris Metcalf Cc: Brian Gerst Cc: Arnaldo Carvalho de Melo Cc: Andy Lutomirski Cc: Colin Ian King Cc: Chen Yucong Cc: Adam Buchbinder Cc: Vlastimil Babka Cc: Lorenzo Stoakes Cc: Masami Hiramatsu Cc: Paolo Bonzini Cc: Andrew Morton Cc: Thomas Garnier Link: https://lkml.kernel.org/r/1509135945-13762-14-git-send-email-ricardo.neri-calderon@linux.intel.com Cc: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/inat.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h index 02aff08672115d..1c78580e58bea3 100644 --- a/arch/x86/include/asm/inat.h +++ b/arch/x86/include/asm/inat.h @@ -97,6 +97,16 @@ #define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) #define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) +/* Identifiers for segment registers */ +#define INAT_SEG_REG_IGNORE 0 +#define INAT_SEG_REG_DEFAULT 1 +#define INAT_SEG_REG_CS 2 +#define INAT_SEG_REG_SS 3 +#define INAT_SEG_REG_DS 4 +#define INAT_SEG_REG_ES 5 +#define INAT_SEG_REG_FS 6 +#define INAT_SEG_REG_GS 7 + /* Attribute search APIs */ extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); extern int inat_get_last_prefix_id(insn_byte_t last_pfx); From 662fd946aa4637a822bff0ec02afd59a8e2c2ba2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 20 Dec 2017 18:02:34 +0100 Subject: [PATCH 0988/3326] x86/Kconfig: Limit NR_CPUS on 32-bit to a sane amount commit 7bbcbd3d1cdcbacd0f9f8dc4c98d550972f1ca30 upstream. The recent cpu_entry_area changes fail to compile on 32-bit when BIGSMP=y and NR_CPUS=512, because the fixmap area becomes too big. Limit the number of CPUs with BIGSMP to 64, which is already way to big for 32-bit, but it's at least a working limitation. We performed a quick survey of 32-bit-only machines that might be affected by this change negatively, but found none. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 48646160eb8369..592c974d455887 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -925,7 +925,8 @@ config MAXSMP config NR_CPUS int "Maximum number of CPUs" if SMP && !MAXSMP range 2 8 if SMP && X86_32 && !X86_BIGSMP - range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK + range 2 64 if SMP && X86_32 && X86_BIGSMP + range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64 range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64 default "1" if !SMP default "8192" if MAXSMP From c4bc398080d80008e7d4a8cff528b8f3e8d8fb63 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 16 Dec 2017 01:14:39 +0100 Subject: [PATCH 0989/3326] x86/mm/dump_pagetables: Check PAGE_PRESENT for real commit c05344947b37f7cda726e802457370bc6eac4d26 upstream. The check for a present page in printk_prot(): if (!pgprot_val(prot)) { /* Not present */ is bogus. If a PTE is set to PAGE_NONE then the pgprot_val is not zero and the entry is decoded in bogus ways, e.g. as RX GLB. That is confusing when analyzing mapping correctness. Check for the present bit to make an informed decision. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/dump_pagetables.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 5e3ac6fe6c9e32..1014cfb21c2c79 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -140,7 +140,7 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg) static const char * const level_name[] = { "cr3", "pgd", "p4d", "pud", "pmd", "pte" }; - if (!pgprot_val(prot)) { + if (!(pr & _PAGE_PRESENT)) { /* Not present */ pt_dump_cont_printf(m, dmsg, " "); } else { From 7b45ad6e0fd7710cb5a5fb9a6bdd0632f510bf50 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 20 Dec 2017 18:07:42 +0100 Subject: [PATCH 0990/3326] x86/mm/dump_pagetables: Make the address hints correct and readable commit 146122e24bdf208015d629babba673e28d090709 upstream. The address hints are a trainwreck. The array entry numbers have to kept magically in sync with the actual hints, which is doomed as some of the array members are initialized at runtime via the entry numbers. Designated initializers have been around before this code was implemented.... Use the entry numbers to populate the address hints array and add the missing bits and pieces. Split 32 and 64 bit for readability sake. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/dump_pagetables.c | 90 +++++++++++++++++++++-------------- 1 file changed, 53 insertions(+), 37 deletions(-) diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 1014cfb21c2c79..fdf09d8f98da21 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -44,10 +44,12 @@ struct addr_marker { unsigned long max_lines; }; -/* indices for address_markers; keep sync'd w/ address_markers below */ +/* Address space markers hints */ + +#ifdef CONFIG_X86_64 + enum address_markers_idx { USER_SPACE_NR = 0, -#ifdef CONFIG_X86_64 KERNEL_SPACE_NR, LOW_KERNEL_NR, VMALLOC_START_NR, @@ -56,56 +58,70 @@ enum address_markers_idx { KASAN_SHADOW_START_NR, KASAN_SHADOW_END_NR, #endif -# ifdef CONFIG_X86_ESPFIX64 +#ifdef CONFIG_X86_ESPFIX64 ESPFIX_START_NR, -# endif +#endif +#ifdef CONFIG_EFI + EFI_END_NR, +#endif HIGH_KERNEL_NR, MODULES_VADDR_NR, MODULES_END_NR, -#else + FIXADDR_START_NR, + END_OF_SPACE_NR, +}; + +static struct addr_marker address_markers[] = { + [USER_SPACE_NR] = { 0, "User Space" }, + [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" }, + [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" }, + [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, + [VMEMMAP_START_NR] = { 0UL, "Vmemmap" }, +#ifdef CONFIG_KASAN + [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" }, + [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, +#endif +#ifdef CONFIG_X86_ESPFIX64 + [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, +#endif +#ifdef CONFIG_EFI + [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" }, +#endif + [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" }, + [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" }, + [MODULES_END_NR] = { MODULES_END, "End Modules" }, + [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" }, + [END_OF_SPACE_NR] = { -1, NULL } +}; + +#else /* CONFIG_X86_64 */ + +enum address_markers_idx { + USER_SPACE_NR = 0, KERNEL_SPACE_NR, VMALLOC_START_NR, VMALLOC_END_NR, -# ifdef CONFIG_HIGHMEM +#ifdef CONFIG_HIGHMEM PKMAP_BASE_NR, -# endif - FIXADDR_START_NR, #endif + FIXADDR_START_NR, + END_OF_SPACE_NR, }; -/* Address space markers hints */ static struct addr_marker address_markers[] = { - { 0, "User Space" }, -#ifdef CONFIG_X86_64 - { 0x8000000000000000UL, "Kernel Space" }, - { 0/* PAGE_OFFSET */, "Low Kernel Mapping" }, - { 0/* VMALLOC_START */, "vmalloc() Area" }, - { 0/* VMEMMAP_START */, "Vmemmap" }, -#ifdef CONFIG_KASAN - { KASAN_SHADOW_START, "KASAN shadow" }, - { KASAN_SHADOW_END, "KASAN shadow end" }, + [USER_SPACE_NR] = { 0, "User Space" }, + [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" }, + [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, + [VMALLOC_END_NR] = { 0UL, "vmalloc() End" }, +#ifdef CONFIG_HIGHMEM + [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, #endif -# ifdef CONFIG_X86_ESPFIX64 - { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, -# endif -# ifdef CONFIG_EFI - { EFI_VA_END, "EFI Runtime Services" }, -# endif - { __START_KERNEL_map, "High Kernel Mapping" }, - { MODULES_VADDR, "Modules" }, - { MODULES_END, "End Modules" }, -#else - { PAGE_OFFSET, "Kernel Mapping" }, - { 0/* VMALLOC_START */, "vmalloc() Area" }, - { 0/*VMALLOC_END*/, "vmalloc() End" }, -# ifdef CONFIG_HIGHMEM - { 0/*PKMAP_BASE*/, "Persistent kmap() Area" }, -# endif - { 0/*FIXADDR_START*/, "Fixmap Area" }, -#endif - { -1, NULL } /* End of list */ + [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, + [END_OF_SPACE_NR] = { -1, NULL } }; +#endif /* !CONFIG_X86_64 */ + /* Multipliers for offsets within the PTEs */ #define PTE_LEVEL_MULT (PAGE_SIZE) #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT) From beb899c4bcdfb006293a593ffe31c9f13ec7c66b Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sun, 10 Dec 2017 22:47:19 -0800 Subject: [PATCH 0991/3326] x86/vsyscall/64: Explicitly set _PAGE_USER in the pagetable hierarchy commit 49275fef986abfb8b476e4708aaecc07e7d3e087 upstream. The kernel is very erratic as to which pagetables have _PAGE_USER set. The vsyscall page gets lucky: it seems that all of the relevant pagetables are among the apparently arbitrary ones that set _PAGE_USER. Rather than relying on chance, just explicitly set _PAGE_USER. This will let us clean up pagetable setup to stop setting _PAGE_USER. The added code can also be reused by pagetable isolation to manage the _PAGE_USER bit in the usermode tables. [ tglx: Folded paravirt fix from Juergen Gross ] Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Kees Cook Cc: Linus Torvalds Cc: Peter Zijlstra Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/vsyscall/vsyscall_64.c | 34 ++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index f279ba2643dc89..daad57c76e4263 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -37,6 +37,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include "vsyscall_trace.h" @@ -329,16 +330,47 @@ int in_gate_area_no_mm(unsigned long addr) return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR; } +/* + * The VSYSCALL page is the only user-accessible page in the kernel address + * range. Normally, the kernel page tables can have _PAGE_USER clear, but + * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls + * are enabled. + * + * Some day we may create a "minimal" vsyscall mode in which we emulate + * vsyscalls but leave the page not present. If so, we skip calling + * this. + */ +static void __init set_vsyscall_pgtable_user_bits(void) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset_k(VSYSCALL_ADDR); + set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); + p4d = p4d_offset(pgd, VSYSCALL_ADDR); +#if CONFIG_PGTABLE_LEVELS >= 5 + p4d->p4d |= _PAGE_USER; +#endif + pud = pud_offset(p4d, VSYSCALL_ADDR); + set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER)); + pmd = pmd_offset(pud, VSYSCALL_ADDR); + set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER)); +} + void __init map_vsyscall(void) { extern char __vsyscall_page; unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); - if (vsyscall_mode != NONE) + if (vsyscall_mode != NONE) { __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, vsyscall_mode == NATIVE ? PAGE_KERNEL_VSYSCALL : PAGE_KERNEL_VVAR); + set_vsyscall_pgtable_user_bits(); + } BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != (unsigned long)VSYSCALL_ADDR); From 49c01662d3177e1814ceb52337add473fb4d264b Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sun, 10 Dec 2017 22:47:20 -0800 Subject: [PATCH 0992/3326] x86/vsyscall/64: Warn and fail vsyscall emulation in NATIVE mode commit 4831b779403a836158917d59a7ca880483c67378 upstream. If something goes wrong with pagetable setup, vsyscall=native will accidentally fall back to emulation. Make it warn and fail so that we notice. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Kees Cook Cc: Linus Torvalds Cc: Peter Zijlstra Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/vsyscall/vsyscall_64.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index daad57c76e4263..1faf40f2dda9a8 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -139,6 +139,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) WARN_ON_ONCE(address != regs->ip); + /* This should be unreachable in NATIVE mode. */ + if (WARN_ON(vsyscall_mode == NATIVE)) + return false; + if (vsyscall_mode == NONE) { warn_bad_vsyscall(KERN_INFO, regs, "vsyscall attempted with vsyscall=none"); From ee8e8b2df6d3f2a4ce25182d18964f891760ac65 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 14 Dec 2017 12:27:29 +0100 Subject: [PATCH 0993/3326] arch, mm: Allow arch_dup_mmap() to fail commit c10e83f598d08046dd1ebc8360d4bb12d802d51b upstream. In order to sanitize the LDT initialization on x86 arch_dup_mmap() must be allowed to fail. Fix up all instances. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Andy Lutomirsky Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: dan.j.williams@intel.com Cc: hughd@google.com Cc: keescook@google.com Cc: kirill.shutemov@linux.intel.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/include/asm/mmu_context.h | 5 +++-- arch/um/include/asm/mmu_context.h | 3 ++- arch/unicore32/include/asm/mmu_context.h | 5 +++-- arch/x86/include/asm/mmu_context.h | 4 ++-- include/asm-generic/mm_hooks.h | 5 +++-- kernel/fork.c | 3 +-- 6 files changed, 14 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 492d8140a395fe..44fdf4786638b1 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -114,9 +114,10 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, #endif } -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) { + return 0; } static inline void arch_exit_mmap(struct mm_struct *mm) diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index b668e351fd6c2e..fca34b2177e28a 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -15,9 +15,10 @@ extern void uml_setup_stubs(struct mm_struct *mm); /* * Needed since we do not use the asm-generic/mm_hooks.h: */ -static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { uml_setup_stubs(mm); + return 0; } extern void arch_exit_mmap(struct mm_struct *mm); static inline void arch_unmap(struct mm_struct *mm, diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h index 59b06b48f27d7a..5c205a9cb5a6a4 100644 --- a/arch/unicore32/include/asm/mmu_context.h +++ b/arch/unicore32/include/asm/mmu_context.h @@ -81,9 +81,10 @@ do { \ } \ } while (0) -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) { + return 0; } static inline void arch_unmap(struct mm_struct *mm, diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 6d16d15d09a0da..c76162439c8a29 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -176,10 +176,10 @@ do { \ } while (0) #endif -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { paravirt_arch_dup_mmap(oldmm, mm); + return 0; } static inline void arch_exit_mmap(struct mm_struct *mm) diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h index ea189d88a3cc76..8ac4e68a12f08e 100644 --- a/include/asm-generic/mm_hooks.h +++ b/include/asm-generic/mm_hooks.h @@ -7,9 +7,10 @@ #ifndef _ASM_GENERIC_MM_HOOKS_H #define _ASM_GENERIC_MM_HOOKS_H -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) { + return 0; } static inline void arch_exit_mmap(struct mm_struct *mm) diff --git a/kernel/fork.c b/kernel/fork.c index 07cc743698d366..500ce64517d93e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -721,8 +721,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, goto out; } /* a new mm has just been created */ - arch_dup_mmap(oldmm, mm); - retval = 0; + retval = arch_dup_mmap(oldmm, mm); out: up_write(&mm->mmap_sem); flush_tlb_mm(oldmm); From b17459342c55a209014a783d74bff6a0a5374339 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 14 Dec 2017 12:27:30 +0100 Subject: [PATCH 0994/3326] x86/ldt: Rework locking commit c2b3496bb30bd159e9de42e5c952e1f1f33c9a77 upstream. The LDT is duplicated on fork() and on exec(), which is wrong as exec() should start from a clean state, i.e. without LDT. To fix this the LDT duplication code will be moved into arch_dup_mmap() which is only called for fork(). This introduces a locking problem. arch_dup_mmap() holds mmap_sem of the parent process, but the LDT duplication code needs to acquire mm->context.lock to access the LDT data safely, which is the reverse lock order of write_ldt() where mmap_sem nests into context.lock. Solve this by introducing a new rw semaphore which serializes the read/write_ldt() syscall operations and use context.lock to protect the actual installment of the LDT descriptor. So context.lock stabilizes mm->context.ldt and can nest inside of the new semaphore or mmap_sem. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Andy Lutomirsky Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: dan.j.williams@intel.com Cc: hughd@google.com Cc: keescook@google.com Cc: kirill.shutemov@linux.intel.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mmu.h | 4 +++- arch/x86/include/asm/mmu_context.h | 2 ++ arch/x86/kernel/ldt.c | 33 +++++++++++++++++++----------- 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 9ea26f16749706..5ff3e8af2c2056 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -3,6 +3,7 @@ #define _ASM_X86_MMU_H #include +#include #include #include @@ -27,7 +28,8 @@ typedef struct { atomic64_t tlb_gen; #ifdef CONFIG_MODIFY_LDT_SYSCALL - struct ldt_struct *ldt; + struct rw_semaphore ldt_usr_sem; + struct ldt_struct *ldt; #endif #ifdef CONFIG_X86_64 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index c76162439c8a29..4fdbe5efe535f4 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -132,6 +132,8 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + mutex_init(&mm->context.lock); + mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); atomic64_set(&mm->context.tlb_gen, 0); diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 1c1eae9613406b..1600aebc1ec78f 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -5,6 +5,11 @@ * Copyright (C) 2002 Andi Kleen * * This handles calls from both 32bit and 64bit mode. + * + * Lock order: + * contex.ldt_usr_sem + * mmap_sem + * context.lock */ #include @@ -42,7 +47,7 @@ static void refresh_ldt_segments(void) #endif } -/* context.lock is held for us, so we don't need any locking. */ +/* context.lock is held by the task which issued the smp function call */ static void flush_ldt(void *__mm) { struct mm_struct *mm = __mm; @@ -99,15 +104,17 @@ static void finalize_ldt_struct(struct ldt_struct *ldt) paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); } -/* context.lock is held */ -static void install_ldt(struct mm_struct *current_mm, - struct ldt_struct *ldt) +static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt) { + mutex_lock(&mm->context.lock); + /* Synchronizes with READ_ONCE in load_mm_ldt. */ - smp_store_release(¤t_mm->context.ldt, ldt); + smp_store_release(&mm->context.ldt, ldt); - /* Activate the LDT for all CPUs using current_mm. */ - on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); + /* Activate the LDT for all CPUs using currents mm. */ + on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true); + + mutex_unlock(&mm->context.lock); } static void free_ldt_struct(struct ldt_struct *ldt) @@ -133,7 +140,8 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) struct mm_struct *old_mm; int retval = 0; - mutex_init(&mm->context.lock); + init_rwsem(&mm->context.ldt_usr_sem); + old_mm = current->mm; if (!old_mm) { mm->context.ldt = NULL; @@ -180,7 +188,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount) unsigned long entries_size; int retval; - mutex_lock(&mm->context.lock); + down_read(&mm->context.ldt_usr_sem); if (!mm->context.ldt) { retval = 0; @@ -209,7 +217,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount) retval = bytecount; out_unlock: - mutex_unlock(&mm->context.lock); + up_read(&mm->context.ldt_usr_sem); return retval; } @@ -269,7 +277,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) ldt.avl = 0; } - mutex_lock(&mm->context.lock); + if (down_write_killable(&mm->context.ldt_usr_sem)) + return -EINTR; old_ldt = mm->context.ldt; old_nr_entries = old_ldt ? old_ldt->nr_entries : 0; @@ -291,7 +300,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) error = 0; out_unlock: - mutex_unlock(&mm->context.lock); + up_write(&mm->context.ldt_usr_sem); out: return error; } From 2c8e9099aecec2baaac8d34c7b823493f2d0eeed Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 14 Dec 2017 12:27:31 +0100 Subject: [PATCH 0995/3326] x86/ldt: Prevent LDT inheritance on exec commit a4828f81037f491b2cc986595e3a969a6eeb2fb5 upstream. The LDT is inherited across fork() or exec(), but that makes no sense at all because exec() is supposed to start the process clean. The reason why this happens is that init_new_context_ldt() is called from init_new_context() which obviously needs to be called for both fork() and exec(). It would be surprising if anything relies on that behaviour, so it seems to be safe to remove that misfeature. Split the context initialization into two parts. Clear the LDT pointer and initialize the mutex from the general context init and move the LDT duplication to arch_dup_mmap() which is only called on fork(). Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra Cc: Andy Lutomirski Cc: Andy Lutomirsky Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Will Deacon Cc: aliguori@amazon.com Cc: dan.j.williams@intel.com Cc: hughd@google.com Cc: keescook@google.com Cc: kirill.shutemov@linux.intel.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mmu_context.h | 21 ++++++++++++++------- arch/x86/kernel/ldt.c | 18 +++++------------- tools/testing/selftests/x86/ldt_gdt.c | 9 +++------ 3 files changed, 22 insertions(+), 26 deletions(-) diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 4fdbe5efe535f4..5e25423bf9bb6d 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -57,11 +57,17 @@ struct ldt_struct { /* * Used for LDT copy/destruction. */ -int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); +static inline void init_new_context_ldt(struct mm_struct *mm) +{ + mm->context.ldt = NULL; + init_rwsem(&mm->context.ldt_usr_sem); +} +int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); void destroy_context_ldt(struct mm_struct *mm); #else /* CONFIG_MODIFY_LDT_SYSCALL */ -static inline int init_new_context_ldt(struct task_struct *tsk, - struct mm_struct *mm) +static inline void init_new_context_ldt(struct mm_struct *mm) { } +static inline int ldt_dup_context(struct mm_struct *oldmm, + struct mm_struct *mm) { return 0; } @@ -137,15 +143,16 @@ static inline int init_new_context(struct task_struct *tsk, mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); atomic64_set(&mm->context.tlb_gen, 0); - #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { /* pkey 0 is the default and always allocated */ mm->context.pkey_allocation_map = 0x1; /* -1 means unallocated or invalid */ mm->context.execute_only_pkey = -1; } - #endif - return init_new_context_ldt(tsk, mm); +#endif + init_new_context_ldt(mm); + return 0; } static inline void destroy_context(struct mm_struct *mm) { @@ -181,7 +188,7 @@ do { \ static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { paravirt_arch_dup_mmap(oldmm, mm); - return 0; + return ldt_dup_context(oldmm, mm); } static inline void arch_exit_mmap(struct mm_struct *mm) diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 1600aebc1ec78f..a6b5d62f45a737 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -131,28 +131,20 @@ static void free_ldt_struct(struct ldt_struct *ldt) } /* - * we do not have to muck with descriptors here, that is - * done in switch_mm() as needed. + * Called on fork from arch_dup_mmap(). Just copy the current LDT state, + * the new task is not running, so nothing can be installed. */ -int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) +int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm) { struct ldt_struct *new_ldt; - struct mm_struct *old_mm; int retval = 0; - init_rwsem(&mm->context.ldt_usr_sem); - - old_mm = current->mm; - if (!old_mm) { - mm->context.ldt = NULL; + if (!old_mm) return 0; - } mutex_lock(&old_mm->context.lock); - if (!old_mm->context.ldt) { - mm->context.ldt = NULL; + if (!old_mm->context.ldt) goto out_unlock; - } new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries); if (!new_ldt) { diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c index 66e5ce5b91f008..0304ffb714f272 100644 --- a/tools/testing/selftests/x86/ldt_gdt.c +++ b/tools/testing/selftests/x86/ldt_gdt.c @@ -627,13 +627,10 @@ static void do_multicpu_tests(void) static int finish_exec_test(void) { /* - * In a sensible world, this would be check_invalid_segment(0, 1); - * For better or for worse, though, the LDT is inherited across exec. - * We can probably change this safely, but for now we test it. + * Older kernel versions did inherit the LDT on exec() which is + * wrong because exec() starts from a clean state. */ - check_valid_segment(0, 1, - AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB, - 42, true); + check_invalid_segment(0, 1); return nerrs ? 1 : 0; } From 88569f5e3a5648331066b49dd6918b8c70067ae7 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Tue, 12 Dec 2017 07:56:43 -0800 Subject: [PATCH 0996/3326] x86/mm/64: Improve the memory map documentation commit 5a7ccf4754fb3660569a6de52ba7f7fc3dfaf280 upstream. The old docs had the vsyscall range wrong and were missing the fixmap. Fix both. There used to be 8 MB reserved for future vsyscalls, but that's long gone. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Kees Cook Cc: Kirill A. Shutemov Cc: Linus Torvalds Cc: Peter Zijlstra Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- Documentation/x86/x86_64/mm.txt | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index 3448e675b4623c..83ca5a3b90acec 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -19,8 +19,9 @@ ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space ... unused hole ... ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0 -ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space (variable) -ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls +ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space (variable) +[fixmap start] - ffffffffff5fffff kernel-internal fixmap range +ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole Virtual memory map with 5 level page tables: @@ -41,8 +42,9 @@ ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space ... unused hole ... ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0 -ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space -ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls +ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space +[fixmap start] - ffffffffff5fffff kernel-internal fixmap range +ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole Architecture defines a 64-bit virtual address. Implementations can support From d8f29ac7363778f38dab7bcb79ca076b8534904c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 Dec 2017 13:34:54 +0100 Subject: [PATCH 0997/3326] x86/doc: Remove obvious weirdnesses from the x86 MM layout documentation commit e8ffe96e5933d417195268478479933d56213a3f upstream. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- Documentation/x86/x86_64/mm.txt | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index 83ca5a3b90acec..63a41671d25b2b 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -1,6 +1,4 @@ - - Virtual memory map with 4 level page tables: 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm @@ -49,8 +47,9 @@ ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole Architecture defines a 64-bit virtual address. Implementations can support less. Currently supported are 48- and 57-bit virtual addresses. Bits 63 -through to the most-significant implemented bit are set to either all ones -or all zero. This causes hole between user space and kernel addresses. +through to the most-significant implemented bit are sign extended. +This causes hole between user space and kernel addresses if you interpret them +as unsigned. The direct mapping covers all memory in the system up to the highest memory address (this means in some cases it can also include PCI memory @@ -60,9 +59,6 @@ vmalloc space is lazily synchronized into the different PML4/PML5 pages of the processes using the page fault handler, with init_top_pgt as reference. -Current X86-64 implementations support up to 46 bits of address space (64 TB), -which is our current limit. This expands into MBZ space in the page tables. - We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual memory window (this size is arbitrary, it can be raised later if needed). The mappings are not part of any other kernel PGD and are only available @@ -74,5 +70,3 @@ following fixmap section. Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all physical memory, vmalloc/ioremap space and virtual memory map are randomized. Their order is preserved but their base will be offset early at boot time. - --Andi Kleen, Jul 2004 From 06f9acfe0abcc9386bbeee8438b87f90ff2e04d1 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 4 Dec 2017 17:25:07 -0800 Subject: [PATCH 0998/3326] x86/entry: Rename SYSENTER_stack to CPU_ENTRY_AREA_entry_stack commit 4fe2d8b11a370af286287a2661de9d4e6c9a145a upstream. If the kernel oopses while on the trampoline stack, it will print "" even if SYSENTER is not involved. That is rather confusing. The "SYSENTER" stack is used for a lot more than SYSENTER now. Give it a better string to display in stack dumps, and rename the kernel code to match. Also move the 32-bit code over to the new naming even though it still uses the entry stack only for SYSENTER. Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_32.S | 12 ++++++------ arch/x86/entry/entry_64.S | 4 ++-- arch/x86/include/asm/fixmap.h | 8 ++++---- arch/x86/include/asm/processor.h | 6 +++--- arch/x86/include/asm/stacktrace.h | 4 ++-- arch/x86/kernel/asm-offsets.c | 4 ++-- arch/x86/kernel/asm-offsets_32.c | 2 +- arch/x86/kernel/cpu/common.c | 14 +++++++------- arch/x86/kernel/dumpstack.c | 10 +++++----- arch/x86/kernel/dumpstack_32.c | 6 +++--- arch/x86/kernel/dumpstack_64.c | 12 +++++++++--- 11 files changed, 44 insertions(+), 38 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index bd8b57a5c874bc..ace8f321a5a1f2 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -942,9 +942,9 @@ ENTRY(debug) /* Are we currently on the SYSENTER stack? */ movl PER_CPU_VAR(cpu_entry_area), %ecx - addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx - subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ - cmpl $SIZEOF_SYSENTER_stack, %ecx + addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx + subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ + cmpl $SIZEOF_entry_stack, %ecx jb .Ldebug_from_sysenter_stack TRACE_IRQS_OFF @@ -986,9 +986,9 @@ ENTRY(nmi) /* Are we currently on the SYSENTER stack? */ movl PER_CPU_VAR(cpu_entry_area), %ecx - addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx - subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ - cmpl $SIZEOF_SYSENTER_stack, %ecx + addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx + subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ + cmpl $SIZEOF_entry_stack, %ecx jb .Lnmi_from_sysenter_stack /* Not on SYSENTER stack. */ diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 6abe3fcaece997..22c891c3b78d59 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -154,8 +154,8 @@ END(native_usergs_sysret64) _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) /* The top word of the SYSENTER stack is hot and is usable as scratch space. */ -#define RSP_SCRATCH CPU_ENTRY_AREA_SYSENTER_stack + \ - SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA +#define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \ + SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA ENTRY(entry_SYSCALL_64_trampoline) UNWIND_HINT_EMPTY diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 94fc4fa141275b..8153b8d86a3c39 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -56,10 +56,10 @@ struct cpu_entry_area { char gdt[PAGE_SIZE]; /* - * The GDT is just below SYSENTER_stack and thus serves (on x86_64) as + * The GDT is just below entry_stack and thus serves (on x86_64) as * a a read-only guard page. */ - struct SYSENTER_stack_page SYSENTER_stack_page; + struct entry_stack_page entry_stack_page; /* * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because @@ -250,9 +250,9 @@ static inline struct cpu_entry_area *get_cpu_entry_area(int cpu) return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0)); } -static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu) +static inline struct entry_stack *cpu_entry_stack(int cpu) { - return &get_cpu_entry_area(cpu)->SYSENTER_stack_page.stack; + return &get_cpu_entry_area(cpu)->entry_stack_page.stack; } #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index da943411d3d88b..9e482d8b0b9786 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -336,12 +336,12 @@ struct x86_hw_tss { #define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss)) #define INVALID_IO_BITMAP_OFFSET 0x8000 -struct SYSENTER_stack { +struct entry_stack { unsigned long words[64]; }; -struct SYSENTER_stack_page { - struct SYSENTER_stack stack; +struct entry_stack_page { + struct entry_stack stack; } __aligned(PAGE_SIZE); struct tss_struct { diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index f8062bfd43a072..f737068787729f 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -16,7 +16,7 @@ enum stack_type { STACK_TYPE_TASK, STACK_TYPE_IRQ, STACK_TYPE_SOFTIRQ, - STACK_TYPE_SYSENTER, + STACK_TYPE_ENTRY, STACK_TYPE_EXCEPTION, STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1, }; @@ -29,7 +29,7 @@ struct stack_info { bool in_task_stack(unsigned long *stack, struct task_struct *task, struct stack_info *info); -bool in_sysenter_stack(unsigned long *stack, struct stack_info *info); +bool in_entry_stack(unsigned long *stack, struct stack_info *info); int get_stack_info(unsigned long *stack, struct task_struct *task, struct stack_info *info, unsigned long *visit_mask); diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index cd360a5e0dca30..676b7cf4b62bf8 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -97,6 +97,6 @@ void common(void) { /* Layout info for cpu_entry_area */ OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss); OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline); - OFFSET(CPU_ENTRY_AREA_SYSENTER_stack, cpu_entry_area, SYSENTER_stack_page); - DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack)); + OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page); + DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack)); } diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 7d20d9c0b3d69c..fa1261eefa16e7 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -48,7 +48,7 @@ void foo(void) /* Offset from the sysenter stack to tss.sp0 */ DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) - - offsetofend(struct cpu_entry_area, SYSENTER_stack_page.stack)); + offsetofend(struct cpu_entry_area, entry_stack_page.stack)); #ifdef CONFIG_CC_STACKPROTECTOR BLANK(); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 034900623adfe8..ed4acbce37a8b4 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -487,8 +487,8 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); #endif -static DEFINE_PER_CPU_PAGE_ALIGNED(struct SYSENTER_stack_page, - SYSENTER_stack_storage); +static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, + entry_stack_storage); static void __init set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) @@ -523,8 +523,8 @@ static void __init setup_cpu_entry_area(int cpu) #endif __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, SYSENTER_stack_page), - per_cpu_ptr(&SYSENTER_stack_storage, cpu), 1, + set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, entry_stack_page), + per_cpu_ptr(&entry_stack_storage, cpu), 1, PAGE_KERNEL); /* @@ -1323,7 +1323,7 @@ void enable_sep_cpu(void) tss->x86_tss.ss1 = __KERNEL_CS; wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); - wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_SYSENTER_stack(cpu) + 1), 0); + wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); put_cpu(); @@ -1440,7 +1440,7 @@ void syscall_init(void) * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). */ wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); - wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_SYSENTER_stack(cpu) + 1)); + wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1)); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); #else wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); @@ -1655,7 +1655,7 @@ void cpu_init(void) */ set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); load_TR_desc(); - load_sp0((unsigned long)(cpu_SYSENTER_stack(cpu) + 1)); + load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); load_mm_ldt(&init_mm); diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index bbd6d986e2d0fc..1dd3f533d78c86 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -43,9 +43,9 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task, return true; } -bool in_sysenter_stack(unsigned long *stack, struct stack_info *info) +bool in_entry_stack(unsigned long *stack, struct stack_info *info) { - struct SYSENTER_stack *ss = cpu_SYSENTER_stack(smp_processor_id()); + struct entry_stack *ss = cpu_entry_stack(smp_processor_id()); void *begin = ss; void *end = ss + 1; @@ -53,7 +53,7 @@ bool in_sysenter_stack(unsigned long *stack, struct stack_info *info) if ((void *)stack < begin || (void *)stack >= end) return false; - info->type = STACK_TYPE_SYSENTER; + info->type = STACK_TYPE_ENTRY; info->begin = begin; info->end = end; info->next_sp = NULL; @@ -111,13 +111,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, * - task stack * - interrupt stack * - HW exception stacks (double fault, nmi, debug, mce) - * - SYSENTER stack + * - entry stack * * x86-32 can have up to four stacks: * - task stack * - softirq stack * - hardirq stack - * - SYSENTER stack + * - entry stack */ for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { const char *stack_name; diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index 5ff13a6b368069..04170f63e3a1d5 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -26,8 +26,8 @@ const char *stack_type_name(enum stack_type type) if (type == STACK_TYPE_SOFTIRQ) return "SOFTIRQ"; - if (type == STACK_TYPE_SYSENTER) - return "SYSENTER"; + if (type == STACK_TYPE_ENTRY) + return "ENTRY_TRAMPOLINE"; return NULL; } @@ -96,7 +96,7 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, if (task != current) goto unknown; - if (in_sysenter_stack(stack, info)) + if (in_entry_stack(stack, info)) goto recursion_check; if (in_hardirq_stack(stack, info)) diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index abc828f8c29785..563e28d14f2ca1 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -37,8 +37,14 @@ const char *stack_type_name(enum stack_type type) if (type == STACK_TYPE_IRQ) return "IRQ"; - if (type == STACK_TYPE_SYSENTER) - return "SYSENTER"; + if (type == STACK_TYPE_ENTRY) { + /* + * On 64-bit, we have a generic entry stack that we + * use for all the kernel entry points, including + * SYSENTER. + */ + return "ENTRY_TRAMPOLINE"; + } if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST) return exception_stack_names[type - STACK_TYPE_EXCEPTION]; @@ -118,7 +124,7 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, if (in_irq_stack(stack, info)) goto recursion_check; - if (in_sysenter_stack(stack, info)) + if (in_entry_stack(stack, info)) goto recursion_check; goto unknown; From 032fd2e383cb112789bcbb9a728dd4d96a1c280d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 Dec 2017 13:34:50 +0100 Subject: [PATCH 0999/3326] x86/uv: Use the right TLB-flush API commit 3e46e0f5ee3643a1239be9046c7ba6c66ca2b329 upstream. Since uv_flush_tlb_others() implements flush_tlb_others() which is about flushing user mappings, we should use __flush_tlb_single(), which too is about flushing user mappings. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Acked-by: Andrew Banman Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Mike Travis Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/platform/uv/tlb_uv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index f44c0bc95aa2f4..8538a6723171a5 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, local_flush_tlb(); stat->d_alltlb++; } else { - __flush_tlb_one(msg->address); + __flush_tlb_single(msg->address); stat->d_onetlb++; } stat->d_requestee++; From de4c8bbd6e1b631fa5f38e75453e331244ad7028 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 Dec 2017 13:34:51 +0100 Subject: [PATCH 1000/3326] x86/microcode: Dont abuse the TLB-flush interface commit 23cb7d46f371844c004784ad9552a57446f73e5a upstream. Commit: ec400ddeff20 ("x86/microcode_intel_early.c: Early update ucode on Intel's CPU") ... grubbed into tlbflush internals without coherent explanation. Since it says its a precaution and the SDM doesn't mention anything like this, take it out back. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: fenghua.yu@intel.com Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/tlbflush.h | 19 ++++++------------- arch/x86/kernel/cpu/microcode/intel.c | 13 ------------- 2 files changed, 6 insertions(+), 26 deletions(-) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 509046cfa5ce89..c2e45da4e540c2 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -246,20 +246,9 @@ static inline void __native_flush_tlb(void) preempt_enable(); } -static inline void __native_flush_tlb_global_irq_disabled(void) -{ - unsigned long cr4; - - cr4 = this_cpu_read(cpu_tlbstate.cr4); - /* clear PGE */ - native_write_cr4(cr4 & ~X86_CR4_PGE); - /* write old PGE again and flush TLBs */ - native_write_cr4(cr4); -} - static inline void __native_flush_tlb_global(void) { - unsigned long flags; + unsigned long cr4, flags; if (static_cpu_has(X86_FEATURE_INVPCID)) { /* @@ -277,7 +266,11 @@ static inline void __native_flush_tlb_global(void) */ raw_local_irq_save(flags); - __native_flush_tlb_global_irq_disabled(); + cr4 = this_cpu_read(cpu_tlbstate.cr4); + /* toggle PGE */ + native_write_cr4(cr4 ^ X86_CR4_PGE); + /* write old PGE again and flush TLBs */ + native_write_cr4(cr4); raw_local_irq_restore(flags); } diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 7dbcb7adf7975f..8ccdca6d3f9e9b 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -565,15 +565,6 @@ static void print_ucode(struct ucode_cpu_info *uci) } #else -/* - * Flush global tlb. We only do this in x86_64 where paging has been enabled - * already and PGE should be enabled as well. - */ -static inline void flush_tlb_early(void) -{ - __native_flush_tlb_global_irq_disabled(); -} - static inline void print_ucode(struct ucode_cpu_info *uci) { struct microcode_intel *mc; @@ -602,10 +593,6 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) if (rev != mc->hdr.rev) return -1; -#ifdef CONFIG_X86_64 - /* Flush global tlb. This is precaution. */ - flush_tlb_early(); -#endif uci->cpu_sig.rev = rev; if (early) From a0edc4947db9a3cbc6219143ff72e163ad7dbf80 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 Dec 2017 13:34:49 +0100 Subject: [PATCH 1001/3326] x86/mm: Use __flush_tlb_one() for kernel memory commit a501686b2923ce6f2ff2b1d0d50682c6411baf72 upstream. __flush_tlb_single() is for user mappings, __flush_tlb_one() for kernel mappings. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/tlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 3118392cdf756b..0569987f6da642 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -551,7 +551,7 @@ static void do_kernel_range_flush(void *info) /* flush range by one by one 'invlpg' */ for (addr = f->start; addr < f->end; addr += PAGE_SIZE) - __flush_tlb_single(addr); + __flush_tlb_one(addr); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) From 6472c50292d4e4b342f4ea4e3e46e7786ca5c99c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 Dec 2017 13:34:46 +0100 Subject: [PATCH 1002/3326] x86/mm: Remove superfluous barriers commit b5fc6d943808b570bdfbec80f40c6b3855f1c48b upstream. atomic64_inc_return() already implies smp_mb() before and after. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/tlbflush.h | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index c2e45da4e540c2..3e2227386abe59 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -60,19 +60,13 @@ static inline void invpcid_flush_all_nonglobals(void) static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) { - u64 new_tlb_gen; - /* * Bump the generation count. This also serves as a full barrier * that synchronizes with switch_mm(): callers are required to order * their read of mm_cpumask after their writes to the paging * structures. */ - smp_mb__before_atomic(); - new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen); - smp_mb__after_atomic(); - - return new_tlb_gen; + return atomic64_inc_return(&mm->context.tlb_gen); } #ifdef CONFIG_PARAVIRT From 29606f10f399dd159ab6718d05060294b29a4481 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 Dec 2017 13:34:52 +0100 Subject: [PATCH 1003/3326] x86/mm: Add comments to clarify which TLB-flush functions are supposed to flush what commit 3f67af51e56f291d7417d77c4f67cd774633c5e1 upstream. Per popular request.. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/tlbflush.h | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 3e2227386abe59..552d581c8f9fd0 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -228,6 +228,9 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask) extern void initialize_tlbstate_and_flush(void); +/* + * flush the entire current user mapping + */ static inline void __native_flush_tlb(void) { /* @@ -240,6 +243,9 @@ static inline void __native_flush_tlb(void) preempt_enable(); } +/* + * flush everything + */ static inline void __native_flush_tlb_global(void) { unsigned long cr4, flags; @@ -269,17 +275,27 @@ static inline void __native_flush_tlb_global(void) raw_local_irq_restore(flags); } +/* + * flush one page in the user mapping + */ static inline void __native_flush_tlb_single(unsigned long addr) { asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); } +/* + * flush everything + */ static inline void __flush_tlb_all(void) { - if (boot_cpu_has(X86_FEATURE_PGE)) + if (boot_cpu_has(X86_FEATURE_PGE)) { __flush_tlb_global(); - else + } else { + /* + * !PGE -> !PCID (setup_pcid()), thus every flush is total. + */ __flush_tlb(); + } /* * Note: if we somehow had PCID but not PGE, then this wouldn't work -- @@ -290,6 +306,9 @@ static inline void __flush_tlb_all(void) */ } +/* + * flush one page in the kernel mapping + */ static inline void __flush_tlb_one(unsigned long addr) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); From b72e0abe99abaa4e64db83776c11ff6847e829cf Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 4 Dec 2017 15:07:54 +0100 Subject: [PATCH 1004/3326] x86/mm: Move the CR3 construction functions to tlbflush.h commit 50fb83a62cf472dc53ba23bd3f7bd6c1b2b3b53e upstream. For flushing the TLB, the ASID which has been programmed into the hardware must be known. That differs from what is in 'cpu_tlbstate'. Add functions to transform the 'cpu_tlbstate' values into to the one programmed into the hardware (CR3). It's not easy to include mmu_context.h into tlbflush.h, so just move the CR3 building over to tlbflush.h. Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mmu_context.h | 29 +---------------------------- arch/x86/include/asm/tlbflush.h | 26 ++++++++++++++++++++++++++ arch/x86/mm/tlb.c | 8 ++++---- 3 files changed, 31 insertions(+), 32 deletions(-) diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 5e25423bf9bb6d..5ede7cae1d673e 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -290,33 +290,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, return __pkru_allows_pkey(vma_pkey(vma), write); } -/* - * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID - * bits. This serves two purposes. It prevents a nasty situation in - * which PCID-unaware code saves CR3, loads some other value (with PCID - * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if - * the saved ASID was nonzero. It also means that any bugs involving - * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger - * deterministically. - */ - -static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid) -{ - if (static_cpu_has(X86_FEATURE_PCID)) { - VM_WARN_ON_ONCE(asid > 4094); - return __sme_pa(mm->pgd) | (asid + 1); - } else { - VM_WARN_ON_ONCE(asid != 0); - return __sme_pa(mm->pgd); - } -} - -static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) -{ - VM_WARN_ON_ONCE(asid > 4094); - return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH; -} - /* * This can be used from process context to figure out what the value of * CR3 is without needing to do a (slow) __read_cr3(). @@ -326,7 +299,7 @@ static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) */ static inline unsigned long __get_current_cr3_fast(void) { - unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm), + unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd, this_cpu_read(cpu_tlbstate.loaded_mm_asid)); /* For now, be very restrictive about when this can be called. */ diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 552d581c8f9fd0..ee7925adfb5720 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -69,6 +69,32 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) return atomic64_inc_return(&mm->context.tlb_gen); } +/* + * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID bits. + * This serves two purposes. It prevents a nasty situation in which + * PCID-unaware code saves CR3, loads some other value (with PCID == 0), + * and then restores CR3, thus corrupting the TLB for ASID 0 if the saved + * ASID was nonzero. It also means that any bugs involving loading a + * PCID-enabled CR3 with CR4.PCIDE off will trigger deterministically. + */ +struct pgd_t; +static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) +{ + if (static_cpu_has(X86_FEATURE_PCID)) { + VM_WARN_ON_ONCE(asid > 4094); + return __sme_pa(pgd) | (asid + 1); + } else { + VM_WARN_ON_ONCE(asid != 0); + return __sme_pa(pgd); + } +} + +static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) +{ + VM_WARN_ON_ONCE(asid > 4094); + return __sme_pa(pgd) | (asid + 1) | CR3_NOFLUSH; +} + #ifdef CONFIG_PARAVIRT #include #else diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 0569987f6da642..0a1be3adc97eee 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -128,7 +128,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * isn't free. */ #ifdef CONFIG_DEBUG_VM - if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) { + if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) { /* * If we were to BUG here, we'd be very likely to kill * the system so hard that we don't see the call trace. @@ -195,7 +195,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, if (need_flush) { this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); - write_cr3(build_cr3(next, new_asid)); + write_cr3(build_cr3(next->pgd, new_asid)); /* * NB: This gets called via leave_mm() in the idle path @@ -208,7 +208,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } else { /* The new ASID is already up to date. */ - write_cr3(build_cr3_noflush(next, new_asid)); + write_cr3(build_cr3_noflush(next->pgd, new_asid)); /* See above wrt _rcuidle. */ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); @@ -288,7 +288,7 @@ void initialize_tlbstate_and_flush(void) !(cr4_read_shadow() & X86_CR4_PCIDE)); /* Force ASID 0 and force a TLB flush. */ - write_cr3(build_cr3(mm, 0)); + write_cr3(build_cr3(mm->pgd, 0)); /* Reinitialize tlbstate. */ this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); From 1765d0a565ee2585cf7b400e81be9a4da96fd7da Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 4 Dec 2017 15:07:55 +0100 Subject: [PATCH 1005/3326] x86/mm: Remove hard-coded ASID limit checks commit cb0a9144a744e55207e24dcef812f05cd15a499a upstream. First, it's nice to remove the magic numbers. Second, PAGE_TABLE_ISOLATION is going to consume half of the available ASID space. The space is currently unused, but add a comment to spell out this new restriction. Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/tlbflush.h | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index ee7925adfb5720..f88ccd3ae466f0 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -69,6 +69,22 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) return atomic64_inc_return(&mm->context.tlb_gen); } +/* There are 12 bits of space for ASIDS in CR3 */ +#define CR3_HW_ASID_BITS 12 +/* + * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for + * user/kernel switches + */ +#define PTI_CONSUMED_ASID_BITS 0 + +#define CR3_AVAIL_ASID_BITS (CR3_HW_ASID_BITS - PTI_CONSUMED_ASID_BITS) +/* + * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account + * for them being zero-based. Another -1 is because ASID 0 is reserved for + * use by non-PCID-aware users. + */ +#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_ASID_BITS) - 2) + /* * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID bits. * This serves two purposes. It prevents a nasty situation in which @@ -81,7 +97,7 @@ struct pgd_t; static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) { if (static_cpu_has(X86_FEATURE_PCID)) { - VM_WARN_ON_ONCE(asid > 4094); + VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); return __sme_pa(pgd) | (asid + 1); } else { VM_WARN_ON_ONCE(asid != 0); @@ -91,7 +107,7 @@ static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) { - VM_WARN_ON_ONCE(asid > 4094); + VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); return __sme_pa(pgd) | (asid + 1) | CR3_NOFLUSH; } From acefb4516bcecc7d084205e00fe2de589d8259bc Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 4 Dec 2017 15:07:56 +0100 Subject: [PATCH 1006/3326] x86/mm: Put MMU to hardware ASID translation in one place commit dd95f1a4b5ca904c78e6a097091eb21436478abb upstream. There are effectively two ASID types: 1. The one stored in the mmu_context that goes from 0..5 2. The one programmed into the hardware that goes from 1..6 This consolidates the locations where converting between the two (by doing a +1) to a single place which gives us a nice place to comment. PAGE_TABLE_ISOLATION will also need to, given an ASID, know which hardware ASID to flush for the userspace mapping. Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/tlbflush.h | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index f88ccd3ae466f0..8b27daff7a7f34 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -85,20 +85,26 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) */ #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_ASID_BITS) - 2) -/* - * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID bits. - * This serves two purposes. It prevents a nasty situation in which - * PCID-unaware code saves CR3, loads some other value (with PCID == 0), - * and then restores CR3, thus corrupting the TLB for ASID 0 if the saved - * ASID was nonzero. It also means that any bugs involving loading a - * PCID-enabled CR3 with CR4.PCIDE off will trigger deterministically. - */ +static inline u16 kern_pcid(u16 asid) +{ + VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); + /* + * If PCID is on, ASID-aware code paths put the ASID+1 into the + * PCID bits. This serves two purposes. It prevents a nasty + * situation in which PCID-unaware code saves CR3, loads some other + * value (with PCID == 0), and then restores CR3, thus corrupting + * the TLB for ASID 0 if the saved ASID was nonzero. It also means + * that any bugs involving loading a PCID-enabled CR3 with + * CR4.PCIDE off will trigger deterministically. + */ + return asid + 1; +} + struct pgd_t; static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) { if (static_cpu_has(X86_FEATURE_PCID)) { - VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); - return __sme_pa(pgd) | (asid + 1); + return __sme_pa(pgd) | kern_pcid(asid); } else { VM_WARN_ON_ONCE(asid != 0); return __sme_pa(pgd); @@ -108,7 +114,8 @@ static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) { VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); - return __sme_pa(pgd) | (asid + 1) | CR3_NOFLUSH; + VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID)); + return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; } #ifdef CONFIG_PARAVIRT From b6167aeb9faf5966daca3f9f973e1b428a43510c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 Dec 2017 13:34:47 +0100 Subject: [PATCH 1007/3326] x86/mm: Create asm/invpcid.h commit 1a3b0caeb77edeac5ce5fa05e6a61c474c9a9745 upstream. Unclutter tlbflush.h a little. Signed-off-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/invpcid.h | 53 +++++++++++++++++++++++++++++++++ arch/x86/include/asm/tlbflush.h | 49 +----------------------------- 2 files changed, 54 insertions(+), 48 deletions(-) create mode 100644 arch/x86/include/asm/invpcid.h diff --git a/arch/x86/include/asm/invpcid.h b/arch/x86/include/asm/invpcid.h new file mode 100644 index 00000000000000..989cfa86de8518 --- /dev/null +++ b/arch/x86/include/asm/invpcid.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_INVPCID +#define _ASM_X86_INVPCID + +static inline void __invpcid(unsigned long pcid, unsigned long addr, + unsigned long type) +{ + struct { u64 d[2]; } desc = { { pcid, addr } }; + + /* + * The memory clobber is because the whole point is to invalidate + * stale TLB entries and, especially if we're flushing global + * mappings, we don't want the compiler to reorder any subsequent + * memory accesses before the TLB flush. + * + * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and + * invpcid (%rcx), %rax in long mode. + */ + asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" + : : "m" (desc), "a" (type), "c" (&desc) : "memory"); +} + +#define INVPCID_TYPE_INDIV_ADDR 0 +#define INVPCID_TYPE_SINGLE_CTXT 1 +#define INVPCID_TYPE_ALL_INCL_GLOBAL 2 +#define INVPCID_TYPE_ALL_NON_GLOBAL 3 + +/* Flush all mappings for a given pcid and addr, not including globals. */ +static inline void invpcid_flush_one(unsigned long pcid, + unsigned long addr) +{ + __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); +} + +/* Flush all mappings for a given PCID, not including globals. */ +static inline void invpcid_flush_single_context(unsigned long pcid) +{ + __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); +} + +/* Flush all mappings, including globals, for all PCIDs. */ +static inline void invpcid_flush_all(void) +{ + __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); +} + +/* Flush all mappings for all PCIDs except globals. */ +static inline void invpcid_flush_all_nonglobals(void) +{ + __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); +} + +#endif /* _ASM_X86_INVPCID */ diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 8b27daff7a7f34..171b429f43a266 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -9,54 +9,7 @@ #include #include #include - -static inline void __invpcid(unsigned long pcid, unsigned long addr, - unsigned long type) -{ - struct { u64 d[2]; } desc = { { pcid, addr } }; - - /* - * The memory clobber is because the whole point is to invalidate - * stale TLB entries and, especially if we're flushing global - * mappings, we don't want the compiler to reorder any subsequent - * memory accesses before the TLB flush. - * - * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and - * invpcid (%rcx), %rax in long mode. - */ - asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" - : : "m" (desc), "a" (type), "c" (&desc) : "memory"); -} - -#define INVPCID_TYPE_INDIV_ADDR 0 -#define INVPCID_TYPE_SINGLE_CTXT 1 -#define INVPCID_TYPE_ALL_INCL_GLOBAL 2 -#define INVPCID_TYPE_ALL_NON_GLOBAL 3 - -/* Flush all mappings for a given pcid and addr, not including globals. */ -static inline void invpcid_flush_one(unsigned long pcid, - unsigned long addr) -{ - __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); -} - -/* Flush all mappings for a given PCID, not including globals. */ -static inline void invpcid_flush_single_context(unsigned long pcid) -{ - __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); -} - -/* Flush all mappings, including globals, for all PCIDs. */ -static inline void invpcid_flush_all(void) -{ - __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); -} - -/* Flush all mappings for all PCIDs except globals. */ -static inline void invpcid_flush_all_nonglobals(void) -{ - __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); -} +#include static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) { From 1b0eddf0a1d1beabc2f3b23bf807758667c30fc7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 20 Dec 2017 18:28:54 +0100 Subject: [PATCH 1008/3326] x86/cpu_entry_area: Move it to a separate unit commit ed1bbc40a0d10e0c5c74fe7bdc6298295cf40255 upstream. Separate the cpu_entry_area code out of cpu/common.c and the fixmap. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpu_entry_area.h | 52 +++++++++++++ arch/x86/include/asm/fixmap.h | 41 +--------- arch/x86/kernel/cpu/common.c | 94 ----------------------- arch/x86/kernel/traps.c | 1 + arch/x86/mm/Makefile | 2 +- arch/x86/mm/cpu_entry_area.c | 104 ++++++++++++++++++++++++++ 6 files changed, 159 insertions(+), 135 deletions(-) create mode 100644 arch/x86/include/asm/cpu_entry_area.h create mode 100644 arch/x86/mm/cpu_entry_area.c diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h new file mode 100644 index 00000000000000..5471826803afc9 --- /dev/null +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef _ASM_X86_CPU_ENTRY_AREA_H +#define _ASM_X86_CPU_ENTRY_AREA_H + +#include +#include + +/* + * cpu_entry_area is a percpu region that contains things needed by the CPU + * and early entry/exit code. Real types aren't used for all fields here + * to avoid circular header dependencies. + * + * Every field is a virtual alias of some other allocated backing store. + * There is no direct allocation of a struct cpu_entry_area. + */ +struct cpu_entry_area { + char gdt[PAGE_SIZE]; + + /* + * The GDT is just below entry_stack and thus serves (on x86_64) as + * a a read-only guard page. + */ + struct entry_stack_page entry_stack_page; + + /* + * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because + * we need task switches to work, and task switches write to the TSS. + */ + struct tss_struct tss; + + char entry_trampoline[PAGE_SIZE]; + +#ifdef CONFIG_X86_64 + /* + * Exception stacks used for IST entries. + * + * In the future, this should have a separate slot for each stack + * with guard pages between them. + */ + char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]; +#endif +}; + +#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) +#define CPU_ENTRY_AREA_PAGES (CPU_ENTRY_AREA_SIZE / PAGE_SIZE) + +DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); + +extern void setup_cpu_entry_areas(void); + +#endif diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 8153b8d86a3c39..fb801662a23055 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -25,6 +25,7 @@ #else #include #endif +#include /* * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall @@ -44,46 +45,6 @@ extern unsigned long __FIXADDR_TOP; PAGE_SIZE) #endif -/* - * cpu_entry_area is a percpu region in the fixmap that contains things - * needed by the CPU and early entry/exit code. Real types aren't used - * for all fields here to avoid circular header dependencies. - * - * Every field is a virtual alias of some other allocated backing store. - * There is no direct allocation of a struct cpu_entry_area. - */ -struct cpu_entry_area { - char gdt[PAGE_SIZE]; - - /* - * The GDT is just below entry_stack and thus serves (on x86_64) as - * a a read-only guard page. - */ - struct entry_stack_page entry_stack_page; - - /* - * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because - * we need task switches to work, and task switches write to the TSS. - */ - struct tss_struct tss; - - char entry_trampoline[PAGE_SIZE]; - -#ifdef CONFIG_X86_64 - /* - * Exception stacks used for IST entries. - * - * In the future, this should have a separate slot for each stack - * with guard pages between them. - */ - char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]; -#endif -}; - -#define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE) - -extern void setup_cpu_entry_areas(void); - /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index ed4acbce37a8b4..8ddcfa4d4165bb 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -482,102 +482,8 @@ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, [DEBUG_STACK - 1] = DEBUG_STKSZ }; - -static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks - [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); -#endif - -static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, - entry_stack_storage); - -static void __init -set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) -{ - for ( ; pages; pages--, idx--, ptr += PAGE_SIZE) - __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot); -} - -/* Setup the fixmap mappings only once per-processor */ -static void __init setup_cpu_entry_area(int cpu) -{ -#ifdef CONFIG_X86_64 - extern char _entry_trampoline[]; - - /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */ - pgprot_t gdt_prot = PAGE_KERNEL_RO; - pgprot_t tss_prot = PAGE_KERNEL_RO; -#else - /* - * On native 32-bit systems, the GDT cannot be read-only because - * our double fault handler uses a task gate, and entering through - * a task gate needs to change an available TSS to busy. If the - * GDT is read-only, that will triple fault. The TSS cannot be - * read-only because the CPU writes to it on task switches. - * - * On Xen PV, the GDT must be read-only because the hypervisor - * requires it. - */ - pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ? - PAGE_KERNEL_RO : PAGE_KERNEL; - pgprot_t tss_prot = PAGE_KERNEL; -#endif - - __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, entry_stack_page), - per_cpu_ptr(&entry_stack_storage, cpu), 1, - PAGE_KERNEL); - - /* - * The Intel SDM says (Volume 3, 7.2.1): - * - * Avoid placing a page boundary in the part of the TSS that the - * processor reads during a task switch (the first 104 bytes). The - * processor may not correctly perform address translations if a - * boundary occurs in this area. During a task switch, the processor - * reads and writes into the first 104 bytes of each TSS (using - * contiguous physical addresses beginning with the physical address - * of the first byte of the TSS). So, after TSS access begins, if - * part of the 104 bytes is not physically contiguous, the processor - * will access incorrect information without generating a page-fault - * exception. - * - * There are also a lot of errata involving the TSS spanning a page - * boundary. Assert that we're not doing that. - */ - BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ - offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); - BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss), - &per_cpu(cpu_tss_rw, cpu), - sizeof(struct tss_struct) / PAGE_SIZE, - tss_prot); - -#ifdef CONFIG_X86_32 - per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); #endif -#ifdef CONFIG_X86_64 - BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); - BUILD_BUG_ON(sizeof(exception_stacks) != - sizeof(((struct cpu_entry_area *)0)->exception_stacks)); - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks), - &per_cpu(exception_stacks, cpu), - sizeof(exception_stacks) / PAGE_SIZE, - PAGE_KERNEL); - - __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline), - __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); -#endif -} - -void __init setup_cpu_entry_areas(void) -{ - unsigned int cpu; - - for_each_possible_cpu(cpu) - setup_cpu_entry_area(cpu); -} - /* Load the original GDT from the per-cpu structure */ void load_direct_gdt(int cpu) { diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 74136fd16f491f..464daed6894fe3 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 7ba7f3d7f47758..2e0017af8f9b06 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -10,7 +10,7 @@ CFLAGS_REMOVE_mem_encrypt.o = -pg endif obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ - pat.o pgtable.o physaddr.o setup_nx.o tlb.o + pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o # Make sure __phys_addr has no stackprotector nostackp := $(call cc-option, -fno-stack-protector) diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c new file mode 100644 index 00000000000000..235ff9cfaaf4e1 --- /dev/null +++ b/arch/x86/mm/cpu_entry_area.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include +#include +#include +#include + +static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage); + +#ifdef CONFIG_X86_64 +static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks + [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); +#endif + +static void __init +set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) +{ + for ( ; pages; pages--, idx--, ptr += PAGE_SIZE) + __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot); +} + +/* Setup the fixmap mappings only once per-processor */ +static void __init setup_cpu_entry_area(int cpu) +{ +#ifdef CONFIG_X86_64 + extern char _entry_trampoline[]; + + /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */ + pgprot_t gdt_prot = PAGE_KERNEL_RO; + pgprot_t tss_prot = PAGE_KERNEL_RO; +#else + /* + * On native 32-bit systems, the GDT cannot be read-only because + * our double fault handler uses a task gate, and entering through + * a task gate needs to change an available TSS to busy. If the + * GDT is read-only, that will triple fault. The TSS cannot be + * read-only because the CPU writes to it on task switches. + * + * On Xen PV, the GDT must be read-only because the hypervisor + * requires it. + */ + pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ? + PAGE_KERNEL_RO : PAGE_KERNEL; + pgprot_t tss_prot = PAGE_KERNEL; +#endif + + __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); + set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, entry_stack_page), + per_cpu_ptr(&entry_stack_storage, cpu), 1, + PAGE_KERNEL); + + /* + * The Intel SDM says (Volume 3, 7.2.1): + * + * Avoid placing a page boundary in the part of the TSS that the + * processor reads during a task switch (the first 104 bytes). The + * processor may not correctly perform address translations if a + * boundary occurs in this area. During a task switch, the processor + * reads and writes into the first 104 bytes of each TSS (using + * contiguous physical addresses beginning with the physical address + * of the first byte of the TSS). So, after TSS access begins, if + * part of the 104 bytes is not physically contiguous, the processor + * will access incorrect information without generating a page-fault + * exception. + * + * There are also a lot of errata involving the TSS spanning a page + * boundary. Assert that we're not doing that. + */ + BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ + offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); + BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); + set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss), + &per_cpu(cpu_tss_rw, cpu), + sizeof(struct tss_struct) / PAGE_SIZE, + tss_prot); + +#ifdef CONFIG_X86_32 + per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); +#endif + +#ifdef CONFIG_X86_64 + BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); + BUILD_BUG_ON(sizeof(exception_stacks) != + sizeof(((struct cpu_entry_area *)0)->exception_stacks)); + set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks), + &per_cpu(exception_stacks, cpu), + sizeof(exception_stacks) / PAGE_SIZE, + PAGE_KERNEL); + + __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline), + __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); +#endif +} + +void __init setup_cpu_entry_areas(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) + setup_cpu_entry_area(cpu); +} From 3440093266b786d8b8b2506ea788f4462437cd7a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 20 Dec 2017 18:51:31 +0100 Subject: [PATCH 1009/3326] x86/cpu_entry_area: Move it out of the fixmap commit 92a0f81d89571e3e8759366e050ee05cc545ef99 upstream. Put the cpu_entry_area into a separate P4D entry. The fixmap gets too big and 0-day already hit a case where the fixmap PTEs were cleared by cleanup_highmap(). Aside of that the fixmap API is a pain as it's all backwards. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- Documentation/x86/x86_64/mm.txt | 2 + arch/x86/include/asm/cpu_entry_area.h | 18 ++++++- arch/x86/include/asm/desc.h | 1 + arch/x86/include/asm/fixmap.h | 32 +----------- arch/x86/include/asm/pgtable_32_types.h | 15 ++++-- arch/x86/include/asm/pgtable_64_types.h | 47 +++++++++++------- arch/x86/kernel/dumpstack.c | 1 + arch/x86/kernel/traps.c | 5 +- arch/x86/mm/cpu_entry_area.c | 66 +++++++++++++++++++------ arch/x86/mm/dump_pagetables.c | 6 ++- arch/x86/mm/init_32.c | 6 +++ arch/x86/mm/kasan_init_64.c | 29 ++++++----- arch/x86/mm/pgtable_32.c | 1 + arch/x86/xen/mmu_pv.c | 2 - 14 files changed, 143 insertions(+), 88 deletions(-) diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index 63a41671d25b2b..51101708a03ae1 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -12,6 +12,7 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) ... unused hole ... ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) ... unused hole ... +fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ... unused hole ... ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space @@ -35,6 +36,7 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) ... unused hole ... ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) ... unused hole ... +fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ... unused hole ... ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h index 5471826803afc9..2fbc69a0916edb 100644 --- a/arch/x86/include/asm/cpu_entry_area.h +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -43,10 +43,26 @@ struct cpu_entry_area { }; #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) -#define CPU_ENTRY_AREA_PAGES (CPU_ENTRY_AREA_SIZE / PAGE_SIZE) +#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); extern void setup_cpu_entry_areas(void); +extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); + +#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE +#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) + +#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) + +#define CPU_ENTRY_AREA_MAP_SIZE \ + (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE) + +extern struct cpu_entry_area *get_cpu_entry_area(int cpu); + +static inline struct entry_stack *cpu_entry_stack(int cpu) +{ + return &get_cpu_entry_area(cpu)->entry_stack_page.stack; +} #endif diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 2ace1f90d13830..bc359dd2f7f646 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index fb801662a23055..64c4a30e0d3962 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -25,7 +25,6 @@ #else #include #endif -#include /* * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall @@ -84,7 +83,6 @@ enum fixed_addresses { FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, #endif - FIX_RO_IDT, /* Virtual mapping for read-only IDT */ #ifdef CONFIG_X86_32 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, @@ -100,9 +98,6 @@ enum fixed_addresses { #ifdef CONFIG_X86_INTEL_MID FIX_LNW_VRTC, #endif - /* Fixmap entries to remap the GDTs, one per processor. */ - FIX_CPU_ENTRY_AREA_TOP, - FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1, #ifdef CONFIG_ACPI_APEI_GHES /* Used for GHES mapping from assorted contexts */ @@ -143,7 +138,7 @@ enum fixed_addresses { extern void reserve_top_address(unsigned long reserve); #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) extern int fixmaps_set; @@ -191,30 +186,5 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, void __early_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags); -static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page) -{ - BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); - - return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page; -} - -#define __get_cpu_entry_area_offset_index(cpu, offset) ({ \ - BUILD_BUG_ON(offset % PAGE_SIZE != 0); \ - __get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE); \ - }) - -#define get_cpu_entry_area_index(cpu, field) \ - __get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field)) - -static inline struct cpu_entry_area *get_cpu_entry_area(int cpu) -{ - return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0)); -} - -static inline struct entry_stack *cpu_entry_stack(int cpu) -{ - return &get_cpu_entry_area(cpu)->entry_stack_page.stack; -} - #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_FIXMAP_H */ diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index f2ca9b28fd6830..ce245b0cdfcaa4 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h @@ -38,13 +38,22 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ #define LAST_PKMAP 1024 #endif -#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ - & PMD_MASK) +/* + * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c + * to avoid include recursion hell + */ +#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40) + +#define CPU_ENTRY_AREA_BASE \ + ((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK) + +#define PKMAP_BASE \ + ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) #ifdef CONFIG_HIGHMEM # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) #else -# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) +# define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE) #endif #define MODULES_VADDR VMALLOC_START diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 6d5f45dcd4a13c..3d27831bc58dfa 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -76,32 +76,41 @@ typedef struct { pteval_t pte; } pte_t; #define PGDIR_MASK (~(PGDIR_SIZE - 1)) /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ -#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) +#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) + #ifdef CONFIG_X86_5LEVEL -#define VMALLOC_SIZE_TB _AC(16384, UL) -#define __VMALLOC_BASE _AC(0xff92000000000000, UL) -#define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) +# define VMALLOC_SIZE_TB _AC(16384, UL) +# define __VMALLOC_BASE _AC(0xff92000000000000, UL) +# define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) #else -#define VMALLOC_SIZE_TB _AC(32, UL) -#define __VMALLOC_BASE _AC(0xffffc90000000000, UL) -#define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) +# define VMALLOC_SIZE_TB _AC(32, UL) +# define __VMALLOC_BASE _AC(0xffffc90000000000, UL) +# define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) #endif + #ifdef CONFIG_RANDOMIZE_MEMORY -#define VMALLOC_START vmalloc_base -#define VMEMMAP_START vmemmap_base +# define VMALLOC_START vmalloc_base +# define VMEMMAP_START vmemmap_base #else -#define VMALLOC_START __VMALLOC_BASE -#define VMEMMAP_START __VMEMMAP_BASE +# define VMALLOC_START __VMALLOC_BASE +# define VMEMMAP_START __VMEMMAP_BASE #endif /* CONFIG_RANDOMIZE_MEMORY */ -#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) -#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) + +#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) + +#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) /* The module sections ends with the start of the fixmap */ -#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) -#define MODULES_LEN (MODULES_END - MODULES_VADDR) -#define ESPFIX_PGD_ENTRY _AC(-2, UL) -#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) -#define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) -#define EFI_VA_END (-68 * (_AC(1, UL) << 30)) +#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) +#define MODULES_LEN (MODULES_END - MODULES_VADDR) + +#define ESPFIX_PGD_ENTRY _AC(-2, UL) +#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) + +#define CPU_ENTRY_AREA_PGD _AC(-3, UL) +#define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT) + +#define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) +#define EFI_VA_END (-68 * (_AC(1, UL) << 30)) #define EARLY_DYNAMIC_PAGE_TABLES 64 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 1dd3f533d78c86..36b17e0febe862 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -18,6 +18,7 @@ #include #include +#include #include #include diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 464daed6894fe3..7c16fe0b60c247 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -951,8 +951,9 @@ void __init trap_init(void) * "sidt" instruction will not leak the location of the kernel, and * to defend the IDT against arbitrary memory write vulnerabilities. * It will be reloaded in cpu_init() */ - __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); - idt_descr.address = fix_to_virt(FIX_RO_IDT); + cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table), + PAGE_KERNEL_RO); + idt_descr.address = CPU_ENTRY_AREA_RO_IDT; /* * Should be a barrier for any external CPU state: diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index 235ff9cfaaf4e1..21e8b595cbb12d 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -15,11 +15,27 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); #endif +struct cpu_entry_area *get_cpu_entry_area(int cpu) +{ + unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; + BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); + + return (struct cpu_entry_area *) va; +} +EXPORT_SYMBOL(get_cpu_entry_area); + +void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags) +{ + unsigned long va = (unsigned long) cea_vaddr; + + set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags)); +} + static void __init -set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) +cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot) { - for ( ; pages; pages--, idx--, ptr += PAGE_SIZE) - __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot); + for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE) + cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot); } /* Setup the fixmap mappings only once per-processor */ @@ -47,10 +63,12 @@ static void __init setup_cpu_entry_area(int cpu) pgprot_t tss_prot = PAGE_KERNEL; #endif - __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, entry_stack_page), - per_cpu_ptr(&entry_stack_storage, cpu), 1, - PAGE_KERNEL); + cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu), + gdt_prot); + + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page, + per_cpu_ptr(&entry_stack_storage, cpu), 1, + PAGE_KERNEL); /* * The Intel SDM says (Volume 3, 7.2.1): @@ -72,10 +90,9 @@ static void __init setup_cpu_entry_area(int cpu) BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss), - &per_cpu(cpu_tss_rw, cpu), - sizeof(struct tss_struct) / PAGE_SIZE, - tss_prot); + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss, + &per_cpu(cpu_tss_rw, cpu), + sizeof(struct tss_struct) / PAGE_SIZE, tss_prot); #ifdef CONFIG_X86_32 per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); @@ -85,20 +102,37 @@ static void __init setup_cpu_entry_area(int cpu) BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); BUILD_BUG_ON(sizeof(exception_stacks) != sizeof(((struct cpu_entry_area *)0)->exception_stacks)); - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks), - &per_cpu(exception_stacks, cpu), - sizeof(exception_stacks) / PAGE_SIZE, - PAGE_KERNEL); + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks, + &per_cpu(exception_stacks, cpu), + sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL); - __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline), + cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline, __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); #endif } +static __init void setup_cpu_entry_area_ptes(void) +{ +#ifdef CONFIG_X86_32 + unsigned long start, end; + + BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); + BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); + + start = CPU_ENTRY_AREA_BASE; + end = start + CPU_ENTRY_AREA_MAP_SIZE; + + for (; start < end; start += PMD_SIZE) + populate_extra_pte(start); +#endif +} + void __init setup_cpu_entry_areas(void) { unsigned int cpu; + setup_cpu_entry_area_ptes(); + for_each_possible_cpu(cpu) setup_cpu_entry_area(cpu); } diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index fdf09d8f98da21..43dedbfb7257a3 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -58,6 +58,7 @@ enum address_markers_idx { KASAN_SHADOW_START_NR, KASAN_SHADOW_END_NR, #endif + CPU_ENTRY_AREA_NR, #ifdef CONFIG_X86_ESPFIX64 ESPFIX_START_NR, #endif @@ -81,6 +82,7 @@ static struct addr_marker address_markers[] = { [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" }, [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, #endif + [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" }, #ifdef CONFIG_X86_ESPFIX64 [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, #endif @@ -104,6 +106,7 @@ enum address_markers_idx { #ifdef CONFIG_HIGHMEM PKMAP_BASE_NR, #endif + CPU_ENTRY_AREA_NR, FIXADDR_START_NR, END_OF_SPACE_NR, }; @@ -116,6 +119,7 @@ static struct addr_marker address_markers[] = { #ifdef CONFIG_HIGHMEM [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, #endif + [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" }, [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, [END_OF_SPACE_NR] = { -1, NULL } }; @@ -541,8 +545,8 @@ static int __init pt_dump_init(void) address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; # endif address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; + address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE; #endif - return 0; } __initcall(pt_dump_init); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8a64a6f2848d9b..135c9a7898c7da 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include "mm_internal.h" @@ -766,6 +767,7 @@ void __init mem_init(void) mem_init_print_info(NULL); printk(KERN_INFO "virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" + " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif @@ -777,6 +779,10 @@ void __init mem_init(void) FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10, + CPU_ENTRY_AREA_BASE, + CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE, + CPU_ENTRY_AREA_MAP_SIZE >> 10, + #ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, (LAST_PKMAP*PAGE_SIZE) >> 10, diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 9ec70d780f1f41..47388f0c0e5964 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -15,6 +15,7 @@ #include #include #include +#include extern struct range pfn_mapped[E820_MAX_ENTRIES]; @@ -322,31 +323,33 @@ void __init kasan_init(void) map_range(&pfn_mapped[i]); } - kasan_populate_zero_shadow( - kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), - kasan_mem_to_shadow((void *)__START_KERNEL_map)); - - kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), - (unsigned long)kasan_mem_to_shadow(_end), - early_pfn_to_nid(__pa(_stext))); - - shadow_cpu_entry_begin = (void *)__fix_to_virt(FIX_CPU_ENTRY_AREA_BOTTOM); + shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE; shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin, PAGE_SIZE); - shadow_cpu_entry_end = (void *)(__fix_to_virt(FIX_CPU_ENTRY_AREA_TOP) + PAGE_SIZE); + shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE + + CPU_ENTRY_AREA_MAP_SIZE); shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end, PAGE_SIZE); - kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), - shadow_cpu_entry_begin); + kasan_populate_zero_shadow( + kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), + shadow_cpu_entry_begin); kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, (unsigned long)shadow_cpu_entry_end, 0); - kasan_populate_zero_shadow(shadow_cpu_entry_end, (void *)KASAN_SHADOW_END); + kasan_populate_zero_shadow(shadow_cpu_entry_end, + kasan_mem_to_shadow((void *)__START_KERNEL_map)); + + kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), + (unsigned long)kasan_mem_to_shadow(_end), + early_pfn_to_nid(__pa(_stext))); + + kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), + (void *)KASAN_SHADOW_END); load_cr3(init_top_pgt); __flush_tlb_all(); diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 6b9bf023a70055..c3c5274410a908 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index c2454237fa672d..a0e2b8c6e5c73c 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -2261,7 +2261,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) switch (idx) { case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: - case FIX_RO_IDT: #ifdef CONFIG_X86_32 case FIX_WP_TEST: # ifdef CONFIG_HIGHMEM @@ -2272,7 +2271,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) #endif case FIX_TEXT_POKE0: case FIX_TEXT_POKE1: - case FIX_CPU_ENTRY_AREA_TOP ... FIX_CPU_ENTRY_AREA_BOTTOM: /* All local page mappings */ pte = pfn_pte(phys, prot); break; From 763f7eaf606281ccfaa2f95445219f797697ed70 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Dec 2017 10:56:29 +0100 Subject: [PATCH 1010/3326] init: Invoke init_espfix_bsp() from mm_init() commit 613e396bc0d4c7604fba23256644e78454c68cf6 upstream. init_espfix_bsp() needs to be invoked before the page table isolation initialization. Move it into mm_init() which is the place where pti_init() will be added. While at it get rid of the #ifdeffery and provide proper stub functions. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/espfix.h | 7 ++++--- arch/x86/kernel/smpboot.c | 6 +----- include/asm-generic/pgtable.h | 5 +++++ init/main.c | 6 ++---- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h index 0211029076ea8b..6777480d8a427e 100644 --- a/arch/x86/include/asm/espfix.h +++ b/arch/x86/include/asm/espfix.h @@ -2,7 +2,7 @@ #ifndef _ASM_X86_ESPFIX_H #define _ASM_X86_ESPFIX_H -#ifdef CONFIG_X86_64 +#ifdef CONFIG_X86_ESPFIX64 #include @@ -11,7 +11,8 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr); extern void init_espfix_bsp(void); extern void init_espfix_ap(int cpu); - -#endif /* CONFIG_X86_64 */ +#else +static inline void init_espfix_ap(int cpu) { } +#endif #endif /* _ASM_X86_ESPFIX_H */ diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 142126ab5aae8b..12bf07d44dfed2 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -990,12 +990,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle, initial_code = (unsigned long)start_secondary; initial_stack = idle->thread.sp; - /* - * Enable the espfix hack for this CPU - */ -#ifdef CONFIG_X86_ESPFIX64 + /* Enable the espfix hack for this CPU */ init_espfix_ap(cpu); -#endif /* So we see what's up */ announce_cpu(cpu, apicid); diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 1ac457511f4eb5..045a7f52ab3a55 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -1025,6 +1025,11 @@ static inline int pmd_clear_huge(pmd_t *pmd) struct file; int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot); + +#ifndef CONFIG_X86_ESPFIX64 +static inline void init_espfix_bsp(void) { } +#endif + #endif /* !__ASSEMBLY__ */ #ifndef io_remap_pfn_range diff --git a/init/main.c b/init/main.c index 0ee9c6866ada1d..8a390f60ec81e5 100644 --- a/init/main.c +++ b/init/main.c @@ -504,6 +504,8 @@ static void __init mm_init(void) pgtable_init(); vmalloc_init(); ioremap_huge_init(); + /* Should be run before the first non-init thread is created */ + init_espfix_bsp(); } asmlinkage __visible void __init start_kernel(void) @@ -673,10 +675,6 @@ asmlinkage __visible void __init start_kernel(void) #ifdef CONFIG_X86 if (efi_enabled(EFI_RUNTIME_SERVICES)) efi_enter_virtual_mode(); -#endif -#ifdef CONFIG_X86_ESPFIX64 - /* Should be run before the first non-init thread is created */ - init_espfix_bsp(); #endif thread_stack_cache_init(); cred_init(); From 752d01704ad18371fa6d15ef16f5dea7972be821 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 23 Dec 2017 19:45:11 +0100 Subject: [PATCH 1011/3326] x86/cpu_entry_area: Prevent wraparound in setup_cpu_entry_area_ptes() on 32bit commit f6c4fd506cb626e4346aa81688f255e593a7c5a0 upstream. The loop which populates the CPU entry area PMDs can wrap around on 32bit machines when the number of CPUs is small. It worked wonderful for NR_CPUS=64 for whatever reason and the moron who wrote that code did not bother to test it with !SMP. Check for the wraparound to fix it. Fixes: 92a0f81d8957 ("x86/cpu_entry_area: Move it out of the fixmap") Reported-by: kernel test robot Signed-off-by: Thomas "Feels stupid" Gleixner Tested-by: Borislav Petkov Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/cpu_entry_area.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index 21e8b595cbb12d..fe814fd5e0140f 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -122,7 +122,8 @@ static __init void setup_cpu_entry_area_ptes(void) start = CPU_ENTRY_AREA_BASE; end = start + CPU_ENTRY_AREA_MAP_SIZE; - for (; start < end; start += PMD_SIZE) + /* Careful here: start + PMD_SIZE might wrap around */ + for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE) populate_extra_pte(start); #endif } From 7a5d5789819417d67ccb6c81f9339ee75f97370a Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 14 Dec 2017 13:31:16 +0100 Subject: [PATCH 1012/3326] ACPI: APEI / ERST: Fix missing error handling in erst_reader() commit bb82e0b4a7e96494f0c1004ce50cec3d7b5fb3d1 upstream. The commit f6f828513290 ("pstore: pass allocated memory region back to caller") changed the check of the return value from erst_read() in erst_reader() in the following way: if (len == -ENOENT) goto skip; - else if (len < 0) { - rc = -1; + else if (len < sizeof(*rcd)) { + rc = -EIO; goto out; This introduced another bug: since the comparison with sizeof() is cast to unsigned, a negative len value doesn't hit any longer. As a result, when an error is returned from erst_read(), the code falls through, and it may eventually lead to some weird thing like memory corruption. This patch adds the negative error value check more explicitly for addressing the issue. Fixes: f6f828513290 (pstore: pass allocated memory region back to caller) Tested-by: Jerry Tang Signed-off-by: Takashi Iwai Acked-by: Kees Cook Reviewed-by: Borislav Petkov Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/apei/erst.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 2c462beee5513e..a943cf17faa729 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -1007,7 +1007,7 @@ static ssize_t erst_reader(struct pstore_record *record) /* The record may be cleared by others, try read next record */ if (len == -ENOENT) goto skip; - else if (len < sizeof(*rcd)) { + else if (len < 0 || len < sizeof(*rcd)) { rc = -EIO; goto out; } From 94e0c5ab52e2bdc1ddbaec32ca0e477a2737eb04 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 30 Nov 2017 19:42:52 -0800 Subject: [PATCH 1013/3326] acpi, nfit: fix health event notification commit adf6895754e2503d994a765535fd1813f8834674 upstream. Integration testing with a BIOS that generates injected health event notifications fails to communicate those events to userspace. The nfit driver neglects to link the ACPI DIMM device with the necessary driver data so acpi_nvdimm_notify() fails this lookup: nfit_mem = dev_get_drvdata(dev); if (nfit_mem && nfit_mem->flags_attr) sysfs_notify_dirent(nfit_mem->flags_attr); Add the necessary linkage when installing the notification handler and clean it up when the nfit driver instance is torn down. Cc: Toshi Kani Cc: Vishal Verma Fixes: ba9c8dd3c222 ("acpi, nfit: add dimm device notification support") Reported-by: Daniel Osawa Tested-by: Daniel Osawa Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/nfit/core.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 9c2c49b6a240d5..dea0fb3d6f64a5 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1457,6 +1457,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, dev_name(&adev_dimm->dev)); return -ENXIO; } + /* + * Record nfit_mem for the notification path to track back to + * the nfit sysfs attributes for this dimm device object. + */ + dev_set_drvdata(&adev_dimm->dev, nfit_mem); /* * Until standardization materializes we need to consider 4 @@ -1516,9 +1521,11 @@ static void shutdown_dimm_notify(void *data) sysfs_put(nfit_mem->flags_attr); nfit_mem->flags_attr = NULL; } - if (adev_dimm) + if (adev_dimm) { acpi_remove_notify_handler(adev_dimm->handle, ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); + dev_set_drvdata(&adev_dimm->dev, NULL); + } } mutex_unlock(&acpi_desc->init_mutex); } From 29082870f58a9b0c793bea745de52a07c74f09aa Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 29 Nov 2017 01:18:57 -0800 Subject: [PATCH 1014/3326] crypto: skcipher - set walk.iv for zero-length inputs commit 2b4f27c36bcd46e820ddb9a8e6fe6a63fa4250b8 upstream. All the ChaCha20 algorithms as well as the ARM bit-sliced AES-XTS algorithms call skcipher_walk_virt(), then access the IV (walk.iv) before checking whether any bytes need to be processed (walk.nbytes). But if the input is empty, then skcipher_walk_virt() doesn't set the IV, and the algorithms crash trying to use the uninitialized IV pointer. Fix it by setting the IV earlier in skcipher_walk_virt(). Also fix it for the AEAD walk functions. This isn't a perfect solution because we can't actually align the IV to ->cra_alignmask unless there are bytes to process, for one because the temporary buffer for the aligned IV is freed by skcipher_walk_done(), which is only called when there are bytes to process. Thus, algorithms that require aligned IVs will still need to avoid accessing the IV when walk.nbytes == 0. Still, many algorithms/architectures are fine with IVs having any alignment, and even for those that aren't, a misaligned pointer bug is much less severe than an uninitialized pointer bug. This change also matches the behavior of the older blkcipher_walk API. Fixes: 0cabf2af6f5a ("crypto: skcipher - Fix crash on zero-length input") Reported-by: syzbot Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/skcipher.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 778e0ff42bfa80..11af5fd6a44357 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -449,6 +449,8 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, walk->total = req->cryptlen; walk->nbytes = 0; + walk->iv = req->iv; + walk->oiv = req->iv; if (unlikely(!walk->total)) return 0; @@ -456,9 +458,6 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); - walk->iv = req->iv; - walk->oiv = req->iv; - walk->flags &= ~SKCIPHER_WALK_SLEEP; walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? SKCIPHER_WALK_SLEEP : 0; @@ -510,6 +509,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, int err; walk->nbytes = 0; + walk->iv = req->iv; + walk->oiv = req->iv; if (unlikely(!walk->total)) return 0; @@ -525,9 +526,6 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, scatterwalk_done(&walk->in, 0, walk->total); scatterwalk_done(&walk->out, 0, walk->total); - walk->iv = req->iv; - walk->oiv = req->iv; - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) walk->flags |= SKCIPHER_WALK_SLEEP; else From 88990591f0b0e7d4aedf0255fc26a09a2f899212 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 30 Nov 2017 13:39:27 +0100 Subject: [PATCH 1015/3326] crypto: mcryptd - protect the per-CPU queue with a lock commit 9abffc6f2efe46c3564c04312e52e07622d40e51 upstream. mcryptd_enqueue_request() grabs the per-CPU queue struct and protects access to it with disabled preemption. Then it schedules a worker on the same CPU. The worker in mcryptd_queue_worker() guards access to the same per-CPU variable with disabled preemption. If we take CPU-hotplug into account then it is possible that between queue_work_on() and the actual invocation of the worker the CPU goes down and the worker will be scheduled on _another_ CPU. And here the preempt_disable() protection does not work anymore. The easiest thing is to add a spin_lock() to guard access to the list. Another detail: mcryptd_queue_worker() is not processing more than MCRYPTD_BATCH invocation in a row. If there are still items left, then it will invoke queue_work() to proceed with more later. *I* would suggest to simply drop that check because it does not use a system workqueue and the workqueue is already marked as "CPU_INTENSIVE". And if preemption is required then the scheduler should do it. However if queue_work() is used then the work item is marked as CPU unbound. That means it will try to run on the local CPU but it may run on another CPU as well. Especially with CONFIG_DEBUG_WQ_FORCE_RR_CPU=y. Again, the preempt_disable() won't work here but lock which was introduced will help. In order to keep work-item on the local CPU (and avoid RR) I changed it to queue_work_on(). Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/mcryptd.c | 23 ++++++++++------------- include/crypto/mcryptd.h | 1 + 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index 4e64726588524f..eca04d3729b37c 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue, pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); + spin_lock_init(&cpu_queue->q_lock); } return 0; } @@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue, int cpu, err; struct mcryptd_cpu_queue *cpu_queue; - cpu = get_cpu(); - cpu_queue = this_cpu_ptr(queue->cpu_queue); - rctx->tag.cpu = cpu; + cpu_queue = raw_cpu_ptr(queue->cpu_queue); + spin_lock(&cpu_queue->q_lock); + cpu = smp_processor_id(); + rctx->tag.cpu = smp_processor_id(); err = crypto_enqueue_request(&cpu_queue->queue, request); pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", cpu, cpu_queue, request); + spin_unlock(&cpu_queue->q_lock); queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); - put_cpu(); return err; } @@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work) cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); i = 0; while (i < MCRYPTD_BATCH || single_task_running()) { - /* - * preempt_disable/enable is used to prevent - * being preempted by mcryptd_enqueue_request() - */ - local_bh_disable(); - preempt_disable(); + + spin_lock_bh(&cpu_queue->q_lock); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); - preempt_enable(); - local_bh_enable(); + spin_unlock_bh(&cpu_queue->q_lock); if (!req) { mcryptd_opportunistic_flush(); @@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work) ++i; } if (cpu_queue->queue.qlen) - queue_work(kcrypto_wq, &cpu_queue->work); + queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work); } void mcryptd_flusher(struct work_struct *__work) diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h index cceafa01f90732..b67404fc4b34ba 100644 --- a/include/crypto/mcryptd.h +++ b/include/crypto/mcryptd.h @@ -27,6 +27,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast( struct mcryptd_cpu_queue { struct crypto_queue queue; + spinlock_t q_lock; struct work_struct work; }; From c692698ebebed838627aa3879e30959c4a75f033 Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Wed, 29 Nov 2017 12:02:23 +0100 Subject: [PATCH 1016/3326] crypto: af_alg - wait for data at beginning of recvmsg commit 11edb555966ed2c66c533d17c604f9d7e580a829 upstream. The wait for data is a non-atomic operation that can sleep and therefore potentially release the socket lock. The release of the socket lock allows another thread to modify the context data structure. The waiting operation for new data therefore must be called at the beginning of recvmsg. This prevents a race condition where checks of the members of the context data structure are performed by recvmsg while there is a potential for modification of these values. Fixes: e870456d8e7c ("crypto: algif_skcipher - overhaul memory management") Fixes: d887c52d6ae4 ("crypto: algif_aead - overhaul memory management") Reported-by: syzbot Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/af_alg.c | 6 ------ crypto/algif_aead.c | 6 ++++++ crypto/algif_skcipher.c | 6 ++++++ 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index e181073ef64d24..6ec360213107fa 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -1165,12 +1165,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, if (!af_alg_readable(sk)) break; - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); - if (err) - return err; - } - seglen = min_t(size_t, (maxsize - len), msg_data_left(msg)); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 3d793bc2aa82c0..ba07da8422c6d7 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -111,6 +111,12 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t processed = 0; /* [in] TX bufs to be consumed */ + if (!ctx->used) { + err = af_alg_wait_for_data(sk, flags); + if (err) + return err; + } + /* * Data length provided by caller via sendmsg/sendpage that has not * yet been processed. diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 30ee2a8e8f4292..3f4458f08293c8 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -72,6 +72,12 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, int err = 0; size_t len = 0; + if (!ctx->used) { + err = af_alg_wait_for_data(sk, flags); + if (err) + return err; + } + /* Allocate cipher request for current operation. */ areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + crypto_skcipher_reqsize(tfm)); From f09fca41e29c68944e75ab5f65a2efcfcc88aceb Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Fri, 8 Dec 2017 11:50:37 +0100 Subject: [PATCH 1017/3326] crypto: af_alg - fix race accessing cipher request commit d53c5135792319e095bb126bc43b2ee98586f7fe upstream. When invoking an asynchronous cipher operation, the invocation of the callback may be performed before the subsequent operations in the initial code path are invoked. The callback deletes the cipher request data structure which implies that after the invocation of the asynchronous cipher operation, this data structure must not be accessed any more. The setting of the return code size with the request data structure must therefore be moved before the invocation of the asynchronous cipher operation. Fixes: e870456d8e7c ("crypto: algif_skcipher - overhaul memory management") Fixes: d887c52d6ae4 ("crypto: algif_aead - overhaul memory management") Reported-by: syzbot Signed-off-by: Stephan Mueller Acked-by: Jonathan Cameron Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/algif_aead.c | 10 +++++----- crypto/algif_skcipher.c | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index ba07da8422c6d7..782cb8fec32369 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -291,6 +291,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, /* AIO operation */ sock_hold(sk); areq->iocb = msg->msg_iocb; + + /* Remember output size that will be generated. */ + areq->outlen = outlen; + aead_request_set_callback(&areq->cra_u.aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_async_cb, areq); @@ -298,12 +302,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, crypto_aead_decrypt(&areq->cra_u.aead_req); /* AIO operation in progress */ - if (err == -EINPROGRESS || err == -EBUSY) { - /* Remember output size that will be generated. */ - areq->outlen = outlen; - + if (err == -EINPROGRESS || err == -EBUSY) return -EIOCBQUEUED; - } sock_put(sk); } else { diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 3f4458f08293c8..7a3e663d54d5c7 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -125,6 +125,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, /* AIO operation */ sock_hold(sk); areq->iocb = msg->msg_iocb; + + /* Remember output size that will be generated. */ + areq->outlen = len; + skcipher_request_set_callback(&areq->cra_u.skcipher_req, CRYPTO_TFM_REQ_MAY_SLEEP, af_alg_async_cb, areq); @@ -133,12 +137,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); /* AIO operation in progress */ - if (err == -EINPROGRESS || err == -EBUSY) { - /* Remember output size that will be generated. */ - areq->outlen = len; - + if (err == -EINPROGRESS || err == -EBUSY) return -EIOCBQUEUED; - } sock_put(sk); } else { From de3b66c01edc8ac2bea82c0337e75f6b5498bf37 Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Tue, 14 Nov 2017 14:43:27 +0000 Subject: [PATCH 1018/3326] mfd: cros ec: spi: Don't send first message too soon commit 15d8374874ded0bec37ef27f8301a6d54032c0e5 upstream. On the Tegra124 Nyan-Big chromebook the very first SPI message sent to the EC is failing. The Tegra SPI driver configures the SPI chip-selects to be active-high by default (and always has for many years). The EC SPI requires an active-low chip-select and so the Tegra chip-select is reconfigured to be active-low when the EC SPI driver calls spi_setup(). The problem is that if the first SPI message to the EC is sent too soon after reconfiguring the SPI chip-select, it fails. The EC SPI driver prevents back-to-back SPI messages being sent too soon by keeping track of the time the last transfer was sent via the variable 'last_transfer_ns'. To prevent the very first transfer being sent too soon, initialise the 'last_transfer_ns' variable after calling spi_setup() and before sending the first SPI message. Signed-off-by: Jon Hunter Reviewed-by: Brian Norris Reviewed-by: Douglas Anderson Acked-by: Benson Leung Signed-off-by: Lee Jones Signed-off-by: Greg Kroah-Hartman --- drivers/mfd/cros_ec_spi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c index c9714072e22465..a14196e95e9b3a 100644 --- a/drivers/mfd/cros_ec_spi.c +++ b/drivers/mfd/cros_ec_spi.c @@ -667,6 +667,7 @@ static int cros_ec_spi_probe(struct spi_device *spi) sizeof(struct ec_response_get_protocol_info); ec_dev->dout_size = sizeof(struct ec_host_request); + ec_spi->last_transfer_ns = ktime_get_ns(); err = cros_ec_register(ec_dev); if (err) { From 6300daa071b21eec1e7411609a09f050966b392e Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Sat, 11 Nov 2017 16:38:43 +0100 Subject: [PATCH 1019/3326] mfd: twl4030-audio: Fix sibling-node lookup commit 0a423772de2f3d7b00899987884f62f63ae00dcb upstream. A helper purported to look up a child node based on its name was using the wrong of-helper and ended up prematurely freeing the parent of-node while leaking any matching node. To make things worse, any matching node would not even necessarily be a child node as the whole device tree was searched depth-first starting at the parent. Fixes: 019a7e6b7b31 ("mfd: twl4030-audio: Add DT support") Signed-off-by: Johan Hovold Acked-by: Peter Ujfalusi Signed-off-by: Lee Jones Signed-off-by: Greg Kroah-Hartman --- drivers/mfd/twl4030-audio.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c index da16bf45fab43e..dc94ffc6321a84 100644 --- a/drivers/mfd/twl4030-audio.c +++ b/drivers/mfd/twl4030-audio.c @@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void) EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk); static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata, - struct device_node *node) + struct device_node *parent) { + struct device_node *node; + if (pdata && pdata->codec) return true; - if (of_find_node_by_name(node, "codec")) + node = of_get_child_by_name(parent, "codec"); + if (node) { + of_node_put(node); return true; + } return false; } From 637de99c7a1523b31aedb83537b0fcba8775663e Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Sat, 11 Nov 2017 16:38:44 +0100 Subject: [PATCH 1020/3326] mfd: twl6040: Fix child-node lookup commit 85e9b13cbb130a3209f21bd7933933399c389ffe upstream. Fix child-node lookup during probe, which ended up searching the whole device tree depth-first starting at the parent rather than just matching on its children. To make things worse, the parent node was prematurely freed, while the child node was leaked. Note that the CONFIG_OF compile guard can be removed as of_get_child_by_name() provides a !CONFIG_OF implementation which always fails. Fixes: 37e13cecaa14 ("mfd: Add support for Device Tree to twl6040") Fixes: ca2cad6ae38e ("mfd: Fix twl6040 build failure") Signed-off-by: Johan Hovold Acked-by: Peter Ujfalusi Signed-off-by: Lee Jones Signed-off-by: Greg Kroah-Hartman --- drivers/mfd/twl6040.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c index d66502d36ba0b3..dd19f17a1b6375 100644 --- a/drivers/mfd/twl6040.c +++ b/drivers/mfd/twl6040.c @@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = { }; -static bool twl6040_has_vibra(struct device_node *node) +static bool twl6040_has_vibra(struct device_node *parent) { -#ifdef CONFIG_OF - if (of_find_node_by_name(node, "vibra")) + struct device_node *node; + + node = of_get_child_by_name(parent, "vibra"); + if (node) { + of_node_put(node); return true; -#endif + } + return false; } From 065a28657370ce3a7ebbc13030ba6231db4a6138 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 14 Dec 2017 16:44:12 +0100 Subject: [PATCH 1021/3326] ALSA: rawmidi: Avoid racy info ioctl via ctl device commit c1cfd9025cc394fd137a01159d74335c5ac978ce upstream. The rawmidi also allows to obtaining the information via ioctl of ctl API. It means that user can issue an ioctl to the rawmidi device even when it's being removed as long as the control device is present. Although the code has some protection via the global register_mutex, its range is limited to the search of the corresponding rawmidi object, and the mutex is already unlocked at accessing the rawmidi object. This may lead to a use-after-free. For avoiding it, this patch widens the application of register_mutex to the whole snd_rawmidi_info_select() function. We have another mutex per rawmidi object, but this operation isn't very hot path, so it shouldn't matter from the performance POV. Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/rawmidi.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index b3b353d7252724..f055ca10bbc1d3 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -579,15 +579,14 @@ static int snd_rawmidi_info_user(struct snd_rawmidi_substream *substream, return 0; } -int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info) +static int __snd_rawmidi_info_select(struct snd_card *card, + struct snd_rawmidi_info *info) { struct snd_rawmidi *rmidi; struct snd_rawmidi_str *pstr; struct snd_rawmidi_substream *substream; - mutex_lock(®ister_mutex); rmidi = snd_rawmidi_search(card, info->device); - mutex_unlock(®ister_mutex); if (!rmidi) return -ENXIO; if (info->stream < 0 || info->stream > 1) @@ -603,6 +602,16 @@ int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info } return -ENXIO; } + +int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info) +{ + int ret; + + mutex_lock(®ister_mutex); + ret = __snd_rawmidi_info_select(card, info); + mutex_unlock(®ister_mutex); + return ret; +} EXPORT_SYMBOL(snd_rawmidi_info_select); static int snd_rawmidi_info_select_user(struct snd_card *card, From 7a6a8463978679e3453785270543abadecb07ab7 Mon Sep 17 00:00:00 2001 From: Kailang Yang Date: Thu, 14 Dec 2017 15:28:58 +0800 Subject: [PATCH 1022/3326] ALSA: hda/realtek - Fix Dell AIO LineOut issue commit 9226665159f0367ad08bc7d5dd194aeadb90316f upstream. Dell AIO had LineOut jack. Add LineOut verb into this patch. [ Additional notes: the ALC274 codec seems requiring the fixed pin / DAC connections for HP / line-out pins for enabling EQ for speakers; i.e. the HP / LO pins expect to be connected with NID 0x03 while keeping the speaker with NID 0x02. However, by adding a new line-out pin, the auto-parser assigns the NID 0x02 for HP/LO pins as primary outputs. As an easy workaround, we provide the preferred_pairs[] to map forcibly for these pins. -- tiwai ] Fixes: 75ee94b20b46 ("ALSA: hda - fix headset mic problem for Dell machines with alc274") Signed-off-by: Kailang Yang Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b076386c8952d9..9ac4b9076ee241 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5162,6 +5162,22 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec, } } +/* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */ +static void alc274_fixup_bind_dacs(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + static hda_nid_t preferred_pairs[] = { + 0x21, 0x03, 0x1b, 0x03, 0x16, 0x02, + 0 + }; + + if (action != HDA_FIXUP_ACT_PRE_PROBE) + return; + + spec->gen.preferred_dacs = preferred_pairs; +} + /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c" @@ -5279,6 +5295,8 @@ enum { ALC233_FIXUP_LENOVO_MULTI_CODECS, ALC294_FIXUP_LENOVO_MIC_LOCATION, ALC700_FIXUP_INTEL_REFERENCE, + ALC274_FIXUP_DELL_BIND_DACS, + ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, }; static const struct hda_fixup alc269_fixups[] = { @@ -6089,6 +6107,21 @@ static const struct hda_fixup alc269_fixups[] = { {} } }, + [ALC274_FIXUP_DELL_BIND_DACS] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc274_fixup_bind_dacs, + .chained = true, + .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE + }, + [ALC274_FIXUP_DELL_AIO_LINEOUT_VERB] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1b, 0x0401102f }, + { } + }, + .chained = true, + .chain_id = ALC274_FIXUP_DELL_BIND_DACS + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -6550,7 +6583,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x14, 0x90170110}, {0x1b, 0x90a70130}, {0x21, 0x03211020}), - SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, + SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, {0x12, 0xb7a60130}, {0x13, 0xb8a61140}, {0x16, 0x90170110}, From 70709c277cdcee4e1700e9155a316700ba1446b9 Mon Sep 17 00:00:00 2001 From: Guneshwor Singh Date: Thu, 7 Dec 2017 18:06:20 +0530 Subject: [PATCH 1023/3326] ALSA: hda - Add vendor id for Cannonlake HDMI codec commit 2b4584d00a6bc02b63ab3c7213060d41a74bdff1 upstream. Cannonlake HDMI codec has the same nid as Geminilake. This adds the codec entry for it. Signed-off-by: Guneshwor Singh Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_hdmi.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index c19c81d230bd74..b4f1b6e8830549 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -55,10 +55,11 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info"); #define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b) #define is_geminilake(codec) (((codec)->core.vendor_id == 0x8086280d) || \ ((codec)->core.vendor_id == 0x80862800)) +#define is_cannonlake(codec) ((codec)->core.vendor_id == 0x8086280c) #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \ || is_skylake(codec) || is_broxton(codec) \ - || is_kabylake(codec)) || is_geminilake(codec) - + || is_kabylake(codec)) || is_geminilake(codec) \ + || is_cannonlake(codec) #define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882) #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883) #define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec)) @@ -3841,6 +3842,7 @@ HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI", patch_i915_hsw_hdmi), HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_i915_hsw_hdmi), HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_i915_hsw_hdmi), HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_i915_hsw_hdmi), +HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), From 39384674586f371503728661d0ded892c348717a Mon Sep 17 00:00:00 2001 From: Jussi Laako Date: Thu, 7 Dec 2017 12:58:33 +0200 Subject: [PATCH 1024/3326] ALSA: usb-audio: Add native DSD support for Esoteric D-05X commit 866f7ed7d67936dcdbcddc111c8af878c918fe7c upstream. Adds VID:PID of Esoteric D-05X to the TEAC device id's. Renames the is_teac_50X_dac() function to is_teac_dsd_dac() to cover broader device family from the same corporation sharing the same USB audio implementation. Signed-off-by: Jussi Laako Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/quirks.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 20624320b753f4..8d7db7cd4f8803 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1172,10 +1172,11 @@ static bool is_marantz_denon_dac(unsigned int id) /* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch * between PCM/DOP and native DSD mode */ -static bool is_teac_50X_dac(unsigned int id) +static bool is_teac_dsd_dac(unsigned int id) { switch (id) { case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */ + case USB_ID(0x0644, 0x8044): /* Esoteric D-05X */ return true; } return false; @@ -1208,7 +1209,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs, break; } mdelay(20); - } else if (is_teac_50X_dac(subs->stream->chip->usb_id)) { + } else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) { /* Vendor mode switch cmd is required. */ switch (fmt->altsetting) { case 3: /* DSD mode (DSD_U32) requested */ @@ -1398,7 +1399,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, } /* TEAC devices with USB DAC functionality */ - if (is_teac_50X_dac(chip->usb_id)) { + if (is_teac_dsd_dac(chip->usb_id)) { if (fp->altsetting == 3) return SNDRV_PCM_FMTBIT_DSD_U32_BE; } From 2c7b98ffac666d4f54d1b57b82c3341f473f7dc5 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 18 Dec 2017 23:36:57 +0100 Subject: [PATCH 1025/3326] ALSA: usb-audio: Fix the missing ctl name suffix at parsing SU commit 5a15f289ee87eaf33f13f08a4909ec99d837ec5f upstream. The commit 89b89d121ffc ("ALSA: usb-audio: Add check return value for usb_string()") added the check of the return value from snd_usb_copy_string_desc(), which is correct per se, but it introduced a regression. In the original code, either the "Clock Source", "Playback Source" or "Capture Source" suffix is added after the terminal string, while the commit changed it to add the suffix only when get_term_name() is failing. It ended up with an incorrect ctl name like "PCM" instead of "PCM Capture Source". Also, even the original code has a similar bug: when the ctl name is generated from snd_usb_copy_string_desc() for the given iSelector, it also doesn't put the suffix. This patch addresses these issues: the suffix is added always when no static mapping is found. Also the patch tries to put more comments and cleans up the if/else block for better readability in order to avoid the same pitfall again. Fixes: 89b89d121ffc ("ALSA: usb-audio: Add check return value for usb_string()") Reported-and-tested-by: Mauro Santos Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/mixer.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 4fde4f8d4444a5..75bce127d768c6 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -2173,20 +2173,25 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, kctl->private_value = (unsigned long)namelist; kctl->private_free = usb_mixer_selector_elem_free; - nameid = uac_selector_unit_iSelector(desc); + /* check the static mapping table at first */ len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); - if (len) - ; - else if (nameid) - len = snd_usb_copy_string_desc(state, nameid, kctl->id.name, - sizeof(kctl->id.name)); - else - len = get_term_name(state, &state->oterm, - kctl->id.name, sizeof(kctl->id.name), 0); - if (!len) { - strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name)); + /* no mapping ? */ + /* if iSelector is given, use it */ + nameid = uac_selector_unit_iSelector(desc); + if (nameid) + len = snd_usb_copy_string_desc(state, nameid, + kctl->id.name, + sizeof(kctl->id.name)); + /* ... or pick up the terminal name at next */ + if (!len) + len = get_term_name(state, &state->oterm, + kctl->id.name, sizeof(kctl->id.name), 0); + /* ... or use the fixed string "USB" as the last resort */ + if (!len) + strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name)); + /* and add the proper suffix */ if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) append_ctl_name(kctl, " Clock Source"); else if ((state->oterm.type & 0xff00) == 0x0100) From d8f477a5cd2021552b135764b77df7c39b7dc504 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 15 Dec 2017 03:07:18 +0100 Subject: [PATCH 1026/3326] PCI / PM: Force devices to D0 in pci_pm_thaw_noirq() commit 5839ee7389e893a31e4e3c9cf17b50d14103c902 upstream. It is incorrect to call pci_restore_state() for devices in low-power states (D1-D3), as that involves the restoration of MSI setup which requires MMIO to be operational and that is only the case in D0. However, pci_pm_thaw_noirq() may do that if the driver's "freeze" callbacks put the device into a low-power state, so fix it by making it force devices into D0 via pci_set_power_state() instead of trying to "update" their power state which is pointless. Fixes: e60514bd4485 (PCI/PM: Restore the status of PCI devices across hibernation) Reported-by: Thomas Gleixner Reported-by: Maarten Lankhorst Tested-by: Thomas Gleixner Tested-by: Maarten Lankhorst Signed-off-by: Rafael J. Wysocki Acked-by: Bjorn Helgaas Signed-off-by: Greg Kroah-Hartman --- drivers/pci/pci-driver.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 11bd267fc1371a..bb0927de79dd7a 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -968,7 +968,12 @@ static int pci_pm_thaw_noirq(struct device *dev) if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume_early(dev); - pci_update_current_state(pci_dev, PCI_D0); + /* + * pci_restore_state() requires the device to be in D0 (because of MSI + * restoration among other things), so force it into D0 in case the + * driver's "freeze" callbacks put it into a low-power state directly. + */ + pci_set_power_state(pci_dev, PCI_D0); pci_restore_state(pci_dev); if (drv && drv->pm && drv->pm->thaw_noirq) From f349652293fb79fbd5768dfc18605200177b74d7 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 20 Dec 2017 13:13:58 -0700 Subject: [PATCH 1027/3326] block: unalign call_single_data in struct request commit 4ccafe032005e9b96acbef2e389a4de5b1254add upstream. A previous change blindly added massive alignment to the call_single_data structure in struct request. This ballooned it in size from 296 to 320 bytes on my setup, for no valid reason at all. Use the unaligned struct __call_single_data variant instead. Fixes: 966a967116e69 ("smp: Avoid using two cache lines for struct call_single_data") Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- include/linux/blkdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8da66379f7ea7a..fd47bd96b5d3d8 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -135,7 +135,7 @@ typedef __u32 __bitwise req_flags_t; struct request { struct list_head queuelist; union { - call_single_data_t csd; + struct __call_single_data csd; u64 fifo_time; }; From 3ef1c33f98c82876555813e301aca7a765f28f92 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 20 Dec 2017 11:10:17 -0700 Subject: [PATCH 1028/3326] block-throttle: avoid double charge commit 111be883981748acc9a56e855c8336404a8e787c upstream. If a bio is throttled and split after throttling, the bio could be resubmited and enters the throttling again. This will cause part of the bio to be charged multiple times. If the cgroup has an IO limit, the double charge will significantly harm the performance. The bio split becomes quite common after arbitrary bio size change. To fix this, we always set the BIO_THROTTLED flag if a bio is throttled. If the bio is cloned/split, we copy the flag to new bio too to avoid a double charge. However, cloned bio could be directed to a new disk, keeping the flag be a problem. The observation is we always set new disk for the bio in this case, so we can clear the flag in bio_set_dev(). This issue exists for a long time, arbitrary bio size change just makes it worse, so this should go into stable at least since v4.2. V1-> V2: Not add extra field in bio based on discussion with Tejun Cc: Vivek Goyal Acked-by: Tejun Heo Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- block/bio.c | 2 ++ block/blk-throttle.c | 8 +------- include/linux/bio.h | 2 ++ include/linux/blk_types.h | 9 ++++----- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/block/bio.c b/block/bio.c index 33fa6b4af31220..7f978eac9a7ae9 100644 --- a/block/bio.c +++ b/block/bio.c @@ -599,6 +599,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) bio->bi_disk = bio_src->bi_disk; bio->bi_partno = bio_src->bi_partno; bio_set_flag(bio, BIO_CLONED); + if (bio_flagged(bio_src, BIO_THROTTLED)) + bio_set_flag(bio, BIO_THROTTLED); bio->bi_opf = bio_src->bi_opf; bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter = bio_src->bi_iter; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 8631763866c6d9..a8cd7b3d964716 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2223,13 +2223,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, out_unlock: spin_unlock_irq(q->queue_lock); out: - /* - * As multiple blk-throtls may stack in the same issue path, we - * don't want bios to leave with the flag set. Clear the flag if - * being issued. - */ - if (!throttled) - bio_clear_flag(bio, BIO_THROTTLED); + bio_set_flag(bio, BIO_THROTTLED); #ifdef CONFIG_BLK_DEV_THROTTLING_LOW if (throttled || !td->track_bio_latency) diff --git a/include/linux/bio.h b/include/linux/bio.h index 275c91c9951633..45f00dd6323c90 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -504,6 +504,8 @@ extern unsigned int bvec_nr_vecs(unsigned short idx); #define bio_set_dev(bio, bdev) \ do { \ + if ((bio)->bi_disk != (bdev)->bd_disk) \ + bio_clear_flag(bio, BIO_THROTTLED);\ (bio)->bi_disk = (bdev)->bd_disk; \ (bio)->bi_partno = (bdev)->bd_partno; \ } while (0) diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 96ac3815542c14..1c8a8a2aedf713 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -50,8 +50,6 @@ struct blk_issue_stat { struct bio { struct bio *bi_next; /* request queue link */ struct gendisk *bi_disk; - u8 bi_partno; - blk_status_t bi_status; unsigned int bi_opf; /* bottom bits req flags, * top bits REQ_OP. Use * accessors. @@ -59,8 +57,8 @@ struct bio { unsigned short bi_flags; /* status, etc and bvec pool number */ unsigned short bi_ioprio; unsigned short bi_write_hint; - - struct bvec_iter bi_iter; + blk_status_t bi_status; + u8 bi_partno; /* Number of segments in this BIO after * physical address coalescing is performed. @@ -74,8 +72,9 @@ struct bio { unsigned int bi_seg_front_size; unsigned int bi_seg_back_size; - atomic_t __bi_remaining; + struct bvec_iter bi_iter; + atomic_t __bi_remaining; bio_end_io_t *bi_end_io; void *bi_private; From 482b6942a8c1e5110a44787f6e72e0d0775bf4c0 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Tue, 12 Dec 2017 21:25:41 +0100 Subject: [PATCH 1029/3326] parisc: Align os_hpmc_size on word boundary commit 0ed9d3de5f8f97e6efd5ca0e3377cab5f0451ead upstream. The os_hpmc_size variable sometimes wasn't aligned at word boundary and thus triggered the unaligned fault handler at startup. Fix it by aligning it properly. Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman --- arch/parisc/kernel/hpmc.S | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index e3a8e5e4d5de75..8d072c44f300c1 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S @@ -305,6 +305,7 @@ ENDPROC_CFI(os_hpmc) __INITRODATA + .align 4 .export os_hpmc_size os_hpmc_size: .word .os_hpmc_end-.os_hpmc From 117b8b85e5777b76128a808533c4f711be564360 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Tue, 12 Dec 2017 21:32:16 +0100 Subject: [PATCH 1030/3326] parisc: Fix indenting in puts() commit 203c110b39a89b48156c7450504e454fedb7f7f6 upstream. Static analysis tools complain that we intended to have curly braces around this indent block. In this case this assumption is wrong, so fix the indenting. Fixes: 2f3c7b8137ef ("parisc: Add core code for self-extracting kernel") Reported-by: Dan Carpenter Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman --- arch/parisc/boot/compressed/misc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c index 9345b44b86f036..f57118e1f6b426 100644 --- a/arch/parisc/boot/compressed/misc.c +++ b/arch/parisc/boot/compressed/misc.c @@ -123,8 +123,8 @@ int puts(const char *s) while ((nuline = strchr(s, '\n')) != NULL) { if (nuline != s) pdc_iodc_print(s, nuline - s); - pdc_iodc_print("\r\n", 2); - s = nuline + 1; + pdc_iodc_print("\r\n", 2); + s = nuline + 1; } if (*s != '\0') pdc_iodc_print(s, strlen(s)); From 13a41fbd867ae482650aab1cee068e1f7f5ad94a Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Tue, 12 Dec 2017 21:52:26 +0100 Subject: [PATCH 1031/3326] parisc: Hide Diva-built-in serial aux and graphics card commit bcf3f1752a622f1372d3252d0fea8855d89812e7 upstream. Diva GSP card has built-in serial AUX port and ATI graphic card which simply don't work and which both don't have external connectors. User Guides even mention that those devices shouldn't be used. So, prevent that Linux drivers try to enable those devices. Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman --- drivers/parisc/lba_pci.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index a25fed52f7e94d..41b740aed3a346 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -1692,3 +1692,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask) iounmap(base_addr); } + +/* + * The design of the Diva management card in rp34x0 machines (rp3410, rp3440) + * seems rushed, so that many built-in components simply don't work. + * The following quirks disable the serial AUX port and the built-in ATI RV100 + * Radeon 7000 graphics card which both don't have any external connectors and + * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as + * such makes those machines the only PARISC machines on which we can't use + * ttyS0 as boot console. + */ +static void quirk_diva_ati_card(struct pci_dev *dev) +{ + if (dev->subsystem_vendor != PCI_VENDOR_ID_HP || + dev->subsystem_device != 0x1292) + return; + + dev_info(&dev->dev, "Hiding Diva built-in ATI card"); + dev->device = 0; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY, + quirk_diva_ati_card); + +static void quirk_diva_aux_disable(struct pci_dev *dev) +{ + if (dev->subsystem_vendor != PCI_VENDOR_ID_HP || + dev->subsystem_device != 0x1291) + return; + + dev_info(&dev->dev, "Hiding Diva built-in AUX serial device"); + dev->device = 0; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX, + quirk_diva_aux_disable); From 1b1f78c02c79d8e5398d3a0cbf45c4a3dee2e9ee Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Mon, 13 Nov 2017 19:35:33 -0500 Subject: [PATCH 1032/3326] Revert "parisc: Re-enable interrupts early" commit 9352aeada4d8d8753fc0e414fbfe8fdfcb68a12c upstream. This reverts commit 5c38602d83e584047906b41b162ababd4db4106d. Interrupts can't be enabled early because the register saves are done on the thread stack prior to switching to the IRQ stack. This caused stack overflows and the thread stack needed increasing to 32k. Even then, stack overflows still occasionally occurred. Background: Even with a 32 kB thread stack, I have seen instances where the thread stack overflowed on the mx3210 buildd. Detection of stack overflow only occurs when we have an external interrupt. When an external interrupt occurs, we switch to the thread stack if we are not already on a kernel stack. Then, registers and specials are saved to the kernel stack. The bug occurs in intr_return where interrupts are reenabled prior to returning from the interrupt. This was done incase we need to schedule or deliver signals. However, it introduces the possibility that multiple external interrupts may occur on the thread stack and cause a stack overflow. These might not be detected and cause the kernel to misbehave in random ways. This patch changes the code back to only reenable interrupts when we are going to schedule or deliver signals. As a result, we generally return from an interrupt before reenabling interrupts. This minimizes the growth of the thread stack. Fixes: 5c38602d83e5 ("parisc: Re-enable interrupts early") Signed-off-by: John David Anglin Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman --- arch/parisc/kernel/entry.S | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index a4fd296c958e8e..f3cecf5117cf8a 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -878,9 +878,6 @@ ENTRY_CFI(syscall_exit_rfi) STREG %r19,PT_SR7(%r16) intr_return: - /* NOTE: Need to enable interrupts incase we schedule. */ - ssm PSW_SM_I, %r0 - /* check for reschedule */ mfctl %cr30,%r1 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ @@ -907,6 +904,11 @@ intr_check_sig: LDREG PT_IASQ1(%r16), %r20 cmpib,COND(=),n 0,%r20,intr_restore /* backward */ + /* NOTE: We need to enable interrupts if we have to deliver + * signals. We used to do this earlier but it caused kernel + * stack overflows. */ + ssm PSW_SM_I, %r0 + copy %r0, %r25 /* long in_syscall = 0 */ #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ @@ -958,6 +960,10 @@ intr_do_resched: cmpib,COND(=) 0, %r20, intr_do_preempt nop + /* NOTE: We need to enable interrupts if we schedule. We used + * to do this earlier but it caused kernel stack overflows. */ + ssm PSW_SM_I, %r0 + #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif From e8f28db0f737690e3d7ecb26cfaf60c7af74f7e5 Mon Sep 17 00:00:00 2001 From: Ricardo Ribalda Delgado Date: Tue, 21 Nov 2017 10:09:02 +0100 Subject: [PATCH 1033/3326] spi: xilinx: Detect stall with Unknown commands commit 5a1314fa697fc65cefaba64cd4699bfc3e6882a6 upstream. When the core is configured in C_SPI_MODE > 0, it integrates a lookup table that automatically configures the core in dual or quad mode based on the command (first byte on the tx fifo). Unfortunately, that list mode_?_memoy_*.mif does not contain all the supported commands by the flash. Since 4.14 spi-nor automatically tries to probe the flash using SFDP (command 0x5a), and that command is not part of the list_mode table. Whit the right combination of C_SPI_MODE and C_SPI_MEMORY this leads into a stall that can only be recovered with a soft rest. This patch detects this kind of stall and returns -EIO to the caller on those commands. spi-nor can handle this error properly: m25p80 spi0.0: Detected stall. Check C_SPI_MODE and C_SPI_MEMORY. 0x21 0x2404 m25p80 spi0.0: SPI transfer failed: -5 spi_master spi0: failed to transfer one message from queue m25p80 spi0.0: s25sl064p (8192 Kbytes) Signed-off-by: Ricardo Ribalda Delgado Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- drivers/spi/spi-xilinx.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c index bc7100b93dfcf0..e0b9fe1d0e37d9 100644 --- a/drivers/spi/spi-xilinx.c +++ b/drivers/spi/spi-xilinx.c @@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) while (remaining_words) { int n_words, tx_words, rx_words; u32 sr; + int stalled; n_words = min(remaining_words, xspi->buffer_size); @@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) /* Read out all the data from the Rx FIFO */ rx_words = n_words; + stalled = 10; while (rx_words) { + if (rx_words == n_words && !(stalled--) && + !(sr & XSPI_SR_TX_EMPTY_MASK) && + (sr & XSPI_SR_RX_EMPTY_MASK)) { + dev_err(&spi->dev, + "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n"); + xspi_init_hw(xspi); + return -EIO; + } + if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) { xilinx_spi_rx(xspi); rx_words--; From ed1918e287547d92a795729993ce8e6ca7940eda Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Mon, 27 Nov 2017 15:16:32 +0100 Subject: [PATCH 1034/3326] spi: a3700: Fix clk prescaling for coefficient over 15 commit 251c201bf4f8b5bf4f1ccb4f8920eed2e1f57580 upstream. The Armada 3700 SPI controller has 2 ranges of prescaler coefficients. One ranging from 0 to 15 by steps of 1, and one ranging from 0 to 30 by steps of 2. This commit fixes the prescaler coefficients that are over 15 so that it uses the correct range of values. The prescaling coefficient is rounded to the upper value if it is odd. This was tested on Espressobin with spidev and a locigal analyser. Signed-off-by: Maxime Chevallier Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- drivers/spi/spi-armada-3700.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index 568e1c65aa82cb..fe3fa1e8517a1b 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c @@ -79,6 +79,7 @@ #define A3700_SPI_BYTE_LEN BIT(5) #define A3700_SPI_CLK_PRESCALE BIT(0) #define A3700_SPI_CLK_PRESCALE_MASK (0x1f) +#define A3700_SPI_CLK_EVEN_OFFS (0x10) #define A3700_SPI_WFIFO_THRS_BIT 28 #define A3700_SPI_RFIFO_THRS_BIT 24 @@ -220,6 +221,13 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi, prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz); + /* For prescaler values over 15, we can only set it by steps of 2. + * Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to + * 30. We only use this range from 16 to 30. + */ + if (prescale > 15) + prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2); + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); val = val & ~A3700_SPI_CLK_PRESCALE_MASK; From efc9b7ae524dfbd4cb9122d2f1d4e9c33d0f11dd Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Mon, 4 Dec 2017 12:11:02 +0300 Subject: [PATCH 1035/3326] pinctrl: cherryview: Mask all interrupts on Intel_Strago based systems commit d2b3c353595a855794f8b9df5b5bdbe8deb0c413 upstream. Guenter Roeck reported an interrupt storm on a prototype system which is based on Cyan Chromebook. The root cause turned out to be a incorrectly configured pin that triggers spurious interrupts. This will be fixed in coreboot but currently we need to prevent the interrupt storm from happening by masking all interrupts (but not GPEs) on those systems. Link: https://bugzilla.kernel.org/show_bug.cgi?id=197953 Fixes: bcb48cca23ec ("pinctrl: cherryview: Do not mask all interrupts in probe") Reported-and-tested-by: Guenter Roeck Reported-by: Dmitry Torokhov Signed-off-by: Mika Westerberg Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/pinctrl/intel/pinctrl-cherryview.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index fadbca907c7c7e..0907531a02caa2 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1620,6 +1620,22 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) clear_bit(i, chip->irq_valid_mask); } + /* + * The same set of machines in chv_no_valid_mask[] have incorrectly + * configured GPIOs that generate spurious interrupts so we use + * this same list to apply another quirk for them. + * + * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953. + */ + if (!need_valid_mask) { + /* + * Mask all interrupts the community is able to generate + * but leave the ones that can only generate GPEs unmasked. + */ + chv_writel(GENMASK(31, pctrl->community->nirqs), + pctrl->regs + CHV_INTMASK); + } + /* Clear all interrupts */ chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); From 8a74c8e87e46fa85fbd0c73273c831559c25d576 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Wed, 6 Dec 2017 17:09:49 +0000 Subject: [PATCH 1036/3326] arm64: kvm: Prevent restoring stale PMSCR_EL1 for vcpu commit bfe766cf65fb65e68c4764f76158718560bdcee5 upstream. When VHE is not present, KVM needs to save and restores PMSCR_EL1 when possible. If SPE is used by the host, value of PMSCR_EL1 cannot be saved for the guest. If the host starts using SPE between two save+restore on the same vcpu, restore will write the value of PMSCR_EL1 read during the first save. Make sure __debug_save_spe_nvhe clears the value of the saved PMSCR_EL1 when the guest cannot use SPE. Signed-off-by: Julien Thierry Cc: Christoffer Dall Cc: Marc Zyngier Cc: Catalin Marinas Reviewed-by: Will Deacon Reviewed-by: Christoffer Dall Signed-off-by: Christoffer Dall Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kvm/hyp/debug-sr.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index f5154ed3da6c94..2add22699764b8 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -84,6 +84,9 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1) { u64 reg; + /* Clear pmscr in case of early return */ + *pmscr_el1 = 0; + /* SPE present on this CPU? */ if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1), ID_AA64DFR0_PMSVER_SHIFT)) From 4e9cca9267feb13658a3bb6785a5649b76e684e0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 7 Dec 2017 11:45:45 +0000 Subject: [PATCH 1037/3326] KVM: arm/arm64: Fix HYP unmapping going off limits commit 7839c672e58bf62da8f2f0197fefb442c02ba1dd upstream. When we unmap the HYP memory, we try to be clever and unmap one PGD at a time. If we start with a non-PGD aligned address and try to unmap a whole PGD, things go horribly wrong in unmap_hyp_range (addr and end can never match, and it all goes really badly as we keep incrementing pgd and parse random memory as page tables...). The obvious fix is to let unmap_hyp_range do what it does best, which is to iterate over a range. The size of the linear mapping, which begins at PAGE_OFFSET, can be easily calculated by subtracting PAGE_OFFSET form high_memory, because high_memory is defined as the linear map address of the last byte of DRAM, plus one. The size of the vmalloc region is given trivially by VMALLOC_END - VMALLOC_START. Reported-by: Andre Przywara Tested-by: Andre Przywara Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Greg Kroah-Hartman --- virt/kvm/arm/mmu.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index b36945d49986dd..b4b69c2d101202 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -509,8 +509,6 @@ static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size) */ void free_hyp_pgds(void) { - unsigned long addr; - mutex_lock(&kvm_hyp_pgd_mutex); if (boot_hyp_pgd) { @@ -521,10 +519,10 @@ void free_hyp_pgds(void) if (hyp_pgd) { unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE); - for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) - unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE); - for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) - unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE); + unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET), + (uintptr_t)high_memory - PAGE_OFFSET); + unmap_hyp_range(hyp_pgd, kern_hyp_va(VMALLOC_START), + VMALLOC_END - VMALLOC_START); free_pages((unsigned long)hyp_pgd, hyp_pgd_order); hyp_pgd = NULL; From 8708f682836827e8c593e370430a3156766b2fe2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Tue, 12 Dec 2017 12:02:04 +0000 Subject: [PATCH 1038/3326] KVM: PPC: Book3S: fix XIVE migration of pending interrupts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit dc1c4165d189350cb51bdd3057deb6ecd164beda upstream. When restoring a pending interrupt, we are setting the Q bit to force a retrigger in xive_finish_unmask(). But we also need to force an EOI in this case to reach the same initial state : P=1, Q=0. This can be done by not setting 'old_p' for pending interrupts which will inform xive_finish_unmask() that an EOI needs to be sent. Fixes: 5af50993850a ("KVM: PPC: Book3S HV: Native usage of the XIVE interrupt controller") Suggested-by: Benjamin Herrenschmidt Signed-off-by: Cédric Le Goater Reviewed-by: Laurent Vivier Tested-by: Laurent Vivier Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kvm/book3s_xive.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index bf457843e03217..b5e6d227a03426 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -1558,7 +1558,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) /* * Restore P and Q. If the interrupt was pending, we - * force both P and Q, which will trigger a resend. + * force Q and !P, which will trigger a resend. * * That means that a guest that had both an interrupt * pending (queued) and Q set will restore with only @@ -1566,7 +1566,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) * is perfectly fine as coalescing interrupts that haven't * been presented yet is always allowed. */ - if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING) + if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING)) state->old_p = true; if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING) state->old_q = true; From 5aa30b450a8b8b6c8a0921e00e900c0ab9b8c4ae Mon Sep 17 00:00:00 2001 From: Laurent Vivier Date: Tue, 12 Dec 2017 18:23:56 +0100 Subject: [PATCH 1039/3326] KVM: PPC: Book3S HV: Fix pending_pri value in kvmppc_xive_get_icp() commit 7333b5aca412d6ad02667b5a513485838a91b136 upstream. When we migrate a VM from a POWER8 host (XICS) to a POWER9 host (XICS-on-XIVE), we have an error: qemu-kvm: Unable to restore KVM interrupt controller state \ (0xff000000) for CPU 0: Invalid argument This is because kvmppc_xics_set_icp() checks the new state is internaly consistent, and especially: ... 1129 if (xisr == 0) { 1130 if (pending_pri != 0xff) 1131 return -EINVAL; ... On the other side, kvmppc_xive_get_icp() doesn't set neither the pending_pri value, nor the xisr value (set to 0) (and kvmppc_xive_set_icp() ignores the pending_pri value) As xisr is 0, pending_pri must be set to 0xff. Fixes: 5af50993850a ("KVM: PPC: Book3S HV: Native usage of the XIVE interrupt controller") Signed-off-by: Laurent Vivier Acked-by: Benjamin Herrenschmidt Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kvm/book3s_xive.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index b5e6d227a03426..0d750d274c4e21 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -725,7 +725,8 @@ u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) /* Return the per-cpu state for state saving/migration */ return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | - (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT; + (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | + (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT; } int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) From 41e1386388dc53bc0b9d7744fd1a99567d737fed Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 4 Dec 2017 22:21:30 -0800 Subject: [PATCH 1040/3326] KVM: MMU: Fix infinite loop when there is no available mmu page MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit ed52870f4676489124d8697fd00e6ae6c504e586 upstream. The below test case can cause infinite loop in kvm when ept=0. #include #include #include #include #include #include #include long r[5]; int main() { r[2] = open("/dev/kvm", O_RDONLY); r[3] = ioctl(r[2], KVM_CREATE_VM, 0); r[4] = ioctl(r[3], KVM_CREATE_VCPU, 7); ioctl(r[4], KVM_RUN, 0); } It doesn't setup the memory regions, mmu_alloc_shadow/direct_roots() in kvm return 1 when kvm fails to allocate root page table which can result in beblow infinite loop: vcpu_run() { for (;;) { r = vcpu_enter_guest()::kvm_mmu_reload() returns 1 if (r <= 0) break; if (need_resched()) cond_resched(); } } This patch fixes it by returning -ENOSPC when there is no available kvm mmu page for root page table. Cc: Paolo Bonzini Cc: Radim Krčmář Fixes: 26eeb53cf0f (KVM: MMU: Bail out immediately if there is no available mmu page) Signed-off-by: Wanpeng Li Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/mmu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 13ebeedcec07b1..0fce8d73403c3b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3382,7 +3382,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) spin_lock(&vcpu->kvm->mmu_lock); if(make_mmu_pages_available(vcpu) < 0) { spin_unlock(&vcpu->kvm->mmu_lock); - return 1; + return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, 0, 0, vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL); @@ -3397,7 +3397,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) spin_lock(&vcpu->kvm->mmu_lock); if (make_mmu_pages_available(vcpu) < 0) { spin_unlock(&vcpu->kvm->mmu_lock); - return 1; + return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL); @@ -3437,7 +3437,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) spin_lock(&vcpu->kvm->mmu_lock); if (make_mmu_pages_available(vcpu) < 0) { spin_unlock(&vcpu->kvm->mmu_lock); - return 1; + return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, root_gfn, 0, vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL); @@ -3474,7 +3474,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) spin_lock(&vcpu->kvm->mmu_lock); if (make_mmu_pages_available(vcpu) < 0) { spin_unlock(&vcpu->kvm->mmu_lock); - return 1; + return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL, 0, ACC_ALL); From 6cc3f6f10240df375f6d7457187b467c1817bdad Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Thu, 7 Dec 2017 00:30:08 -0800 Subject: [PATCH 1041/3326] KVM: X86: Fix load RFLAGS w/o the fixed bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d73235d17ba63b53dc0e1051dbc10a1f1be91b71 upstream. *** Guest State *** CR0: actual=0x0000000000000030, shadow=0x0000000060000010, gh_mask=fffffffffffffff7 CR4: actual=0x0000000000002050, shadow=0x0000000000000000, gh_mask=ffffffffffffe871 CR3 = 0x00000000fffbc000 RSP = 0x0000000000000000 RIP = 0x0000000000000000 RFLAGS=0x00000000 DR7 = 0x0000000000000400 ^^^^^^^^^^ The failed vmentry is triggered by the following testcase when ept=Y: #include #include #include #include #include #include #include long r[5]; int main() { r[2] = open("/dev/kvm", O_RDONLY); r[3] = ioctl(r[2], KVM_CREATE_VM, 0); r[4] = ioctl(r[3], KVM_CREATE_VCPU, 7); struct kvm_regs regs = { .rflags = 0, }; ioctl(r[4], KVM_SET_REGS, ®s); ioctl(r[4], KVM_RUN, 0); } X86 RFLAGS bit 1 is fixed set, userspace can simply clearing bit 1 of RFLAGS with KVM_SET_REGS ioctl which results in vmentry fails. This patch fixes it by oring X86_EFLAGS_FIXED during ioctl. Suggested-by: Jim Mattson Reviewed-by: David Hildenbrand Reviewed-by: Quan Xu Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Jim Mattson Signed-off-by: Wanpeng Li Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index df62cdc7a2582e..075619a92ce71d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7359,7 +7359,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) #endif kvm_rip_write(vcpu, regs->rip); - kvm_set_rflags(vcpu, regs->rflags); + kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); vcpu->arch.exception.pending = false; From 6461005967ed1a92ad67f8d864fdb3f0794a8682 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 21 Dec 2017 00:49:14 +0100 Subject: [PATCH 1042/3326] kvm: x86: fix RSM when PCID is non-zero commit fae1a3e775cca8c3a9e0eb34443b310871a15a92 upstream. rsm_load_state_64() and rsm_enter_protected_mode() load CR3, then CR4 & ~PCIDE, then CR0, then CR4. However, setting CR4.PCIDE fails if CR3[11:0] != 0. It's probably easier in the long run to replace rsm_enter_protected_mode() with an emulator callback that sets all the special registers (like KVM_SET_SREGS would do). For now, set the PCID field of CR3 only after CR4.PCIDE is 1. Reported-by: Laszlo Ersek Tested-by: Laszlo Ersek Fixes: 660a5d517aaab9187f93854425c4c63f4a09195c Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/emulate.c | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index d90cdc77e07735..7bbb5da2b49da0 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2404,9 +2404,21 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) } static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, - u64 cr0, u64 cr4) + u64 cr0, u64 cr3, u64 cr4) { int bad; + u64 pcid; + + /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */ + pcid = 0; + if (cr4 & X86_CR4_PCIDE) { + pcid = cr3 & 0xfff; + cr3 &= ~0xfff; + } + + bad = ctxt->ops->set_cr(ctxt, 3, cr3); + if (bad) + return X86EMUL_UNHANDLEABLE; /* * First enable PAE, long mode needs it before CR0.PG = 1 is set. @@ -2425,6 +2437,12 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, bad = ctxt->ops->set_cr(ctxt, 4, cr4); if (bad) return X86EMUL_UNHANDLEABLE; + if (pcid) { + bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid); + if (bad) + return X86EMUL_UNHANDLEABLE; + } + } return X86EMUL_CONTINUE; @@ -2435,11 +2453,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase) struct desc_struct desc; struct desc_ptr dt; u16 selector; - u32 val, cr0, cr4; + u32 val, cr0, cr3, cr4; int i; cr0 = GET_SMSTATE(u32, smbase, 0x7ffc); - ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8)); + cr3 = GET_SMSTATE(u32, smbase, 0x7ff8); ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED; ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0); @@ -2481,14 +2499,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase) ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8)); - return rsm_enter_protected_mode(ctxt, cr0, cr4); + return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); } static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) { struct desc_struct desc; struct desc_ptr dt; - u64 val, cr0, cr4; + u64 val, cr0, cr3, cr4; u32 base3; u16 selector; int i, r; @@ -2505,7 +2523,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); cr0 = GET_SMSTATE(u64, smbase, 0x7f58); - ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50)); + cr3 = GET_SMSTATE(u64, smbase, 0x7f50); cr4 = GET_SMSTATE(u64, smbase, 0x7f48); ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00)); val = GET_SMSTATE(u64, smbase, 0x7ed0); @@ -2533,7 +2551,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) dt.address = GET_SMSTATE(u64, smbase, 0x7e68); ctxt->ops->set_gdt(ctxt, &dt); - r = rsm_enter_protected_mode(ctxt, cr0, cr4); + r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); if (r != X86EMUL_CONTINUE) return r; From e2d769198ff7cb3eecb60a427706d498d87aa0a0 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Mon, 18 Dec 2017 11:57:51 +0800 Subject: [PATCH 1043/3326] clk: sunxi: sun9i-mmc: Implement reset callback for reset controls commit 61d2f2a05765a5f57149efbd93e3e81a83cbc2c1 upstream. Our MMC host driver now issues a reset, instead of just deasserting the reset control, since commit c34eda69ad4c ("mmc: sunxi: Reset the device at probe time"). The sun9i-mmc clock driver does not support this, and will fail, which results in MMC not probing. This patch implements the reset callback by asserting the reset control, then deasserting it after a small delay. Fixes: 7a6fca879f59 ("clk: sunxi: Add driver for A80 MMC config clocks/resets") Signed-off-by: Chen-Yu Tsai Acked-by: Philipp Zabel Acked-by: Maxime Ripard Signed-off-by: Michael Turquette Link: lkml.kernel.org/r/20171218035751.20661-1-wens@csie.org Signed-off-by: Greg Kroah-Hartman --- drivers/clk/sunxi/clk-sun9i-mmc.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c index 6041bdba2e971e..f69f9e8c6f3806 100644 --- a/drivers/clk/sunxi/clk-sun9i-mmc.c +++ b/drivers/clk/sunxi/clk-sun9i-mmc.c @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -83,9 +84,20 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev, return 0; } +static int sun9i_mmc_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + sun9i_mmc_reset_assert(rcdev, id); + udelay(10); + sun9i_mmc_reset_deassert(rcdev, id); + + return 0; +} + static const struct reset_control_ops sun9i_mmc_reset_ops = { .assert = sun9i_mmc_reset_assert, .deassert = sun9i_mmc_reset_deassert, + .reset = sun9i_mmc_reset_reset, }; static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev) From a971d10f67df27a16cce5084092e151dba3e037d Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Tue, 12 Dec 2017 17:59:15 +0530 Subject: [PATCH 1044/3326] powerpc/perf: Dereference BHRB entries safely commit f41d84dddc66b164ac16acf3f584c276146f1c48 upstream. It's theoretically possible that branch instructions recorded in BHRB (Branch History Rolling Buffer) entries have already been unmapped before they are processed by the kernel. Hence, trying to dereference such memory location will result in a crash. eg: Unable to handle kernel paging request for data at address 0xd000000019c41764 Faulting instruction address: 0xc000000000084a14 NIP [c000000000084a14] branch_target+0x4/0x70 LR [c0000000000eb828] record_and_restart+0x568/0x5c0 Call Trace: [c0000000000eb3b4] record_and_restart+0xf4/0x5c0 (unreliable) [c0000000000ec378] perf_event_interrupt+0x298/0x460 [c000000000027964] performance_monitor_exception+0x54/0x70 [c000000000009ba4] performance_monitor_common+0x114/0x120 Fix it by deferefencing the addresses safely. Fixes: 691231846ceb ("powerpc/perf: Fix setting of "to" addresses for BHRB") Suggested-by: Naveen N. Rao Signed-off-by: Ravi Bangoria Reviewed-by: Naveen N. Rao [mpe: Use probe_kernel_read() which is clearer, tweak change log] Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/perf/core-book3s.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 9e3da168d54cdc..b4209a68b85da2 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -410,8 +410,12 @@ static __u64 power_pmu_bhrb_to(u64 addr) int ret; __u64 target; - if (is_kernel_addr(addr)) - return branch_target((unsigned int *)addr); + if (is_kernel_addr(addr)) { + if (probe_kernel_read(&instr, (void *)addr, sizeof(instr))) + return 0; + + return branch_target(&instr); + } /* Userspace: need copy instruction here then translate it */ pagefault_disable(); From 6209cb514d977e323a7dad1b0ee7e83fc906215d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 4 Dec 2017 13:25:13 +0000 Subject: [PATCH 1045/3326] drm/i915: Flush pending GTT writes before unbinding commit 2797c4a11f373b2545c2398ccb02e362ee66a142 upstream. From the shrinker paths, we want to relinquish the GPU and GGTT access to the object, releasing the backing storage back to the system for swapout. As a part of that process we would unpin the pages, marking them for access by the CPU (for the swapout/swapin). However, if that process was interrupted after unbind the vma, we missed a flush of the inflight GGTT writes before we made that GTT space available again for reuse, with the prospect that we would redirect them to another page. The bug dates back to the introduction of multiple GGTT vma, but the code itself dates to commit 02bef8f98d26 ("drm/i915: Unbind closed vma for i915_gem_object_unbind()"). Fixes: 02bef8f98d26 ("drm/i915: Unbind closed vma for i915_gem_object_unbind()") Fixes: c5ad54cf7dd8 ("drm/i915: Use partial view in mmap fault handler") Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20171204132513.7303-1-chris@chris-wilson.co.uk (cherry picked from commit 5888fc9eac3c2ff96e76aeeb865fdb46ab2d711e) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/i915_gem.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dc1faa49687d14..3b2c0538e48d7b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -325,17 +325,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) * must wait for all rendering to complete to the object (as unbinding * must anyway), and retire the requests. */ - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | - I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - NULL); + ret = i915_gem_object_set_to_cpu_domain(obj, false); if (ret) return ret; - i915_gem_retire_requests(to_i915(obj->base.dev)); - while ((vma = list_first_entry_or_null(&obj->vma_list, struct i915_vma, obj_link))) { From e7681f90a45a8f0eb6da8c0dc68a8334e3ed2251 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 7 Dec 2017 16:58:50 +0100 Subject: [PATCH 1046/3326] drm/sun4i: Fix error path handling commit 92411f6d7f1afcc95e54295d40e96a75385212ec upstream. The commit 4c7f16d14a33 ("drm/sun4i: Fix TCON clock and regmap initialization sequence") moved a bunch of logic around, but forgot to update the gotos after the introduction of the err_free_dotclock label. It means that if we fail later that the one introduced in that commit, we'll just to the old label which isn't free the clock we created. This will result in a breakage as soon as someone tries to do something with that clock, since its resources will have been long reclaimed. Fixes: 4c7f16d14a33 ("drm/sun4i: Fix TCON clock and regmap initialization sequence") Reviewed-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/f83c1cebc731f0b4251f5ddd7b38c718cd79bb0b.1512662253.git-series.maxime.ripard@free-electrons.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/sun4i/sun4i_tcon.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index d9791292553ef3..7b909d814d38c4 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -567,12 +567,12 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master, if (IS_ERR(tcon->crtc)) { dev_err(dev, "Couldn't create our CRTC\n"); ret = PTR_ERR(tcon->crtc); - goto err_free_clocks; + goto err_free_dotclock; } ret = sun4i_rgb_init(drm, tcon); if (ret < 0) - goto err_free_clocks; + goto err_free_dotclock; list_add_tail(&tcon->list, &drv->tcon_list); From 6d80b15a226e59ebbc3ca8c14b65885bc8ffb2b9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 4 Dec 2017 14:07:43 -0800 Subject: [PATCH 1047/3326] libnvdimm, dax: fix 1GB-aligned namespaces vs physical misalignment commit 41fce90f26333c4fa82e8e43b9ace86c4e8a0120 upstream. The following namespace configuration attempt: # ndctl create-namespace -e namespace0.0 -m devdax -a 1G -f libndctl: ndctl_dax_enable: dax0.1: failed to enable Error: namespace0.0: failed to enable failed to reconfigure namespace: No such device or address ...fails when the backing memory range is not physically aligned to 1G: # cat /proc/iomem | grep Persistent 210000000-30fffffff : Persistent Memory (legacy) In the above example the 4G persistent memory range starts and ends on a 256MB boundary. We handle this case correctly when needing to handle cases that violate section alignment (128MB) collisions against "System RAM", and we simply need to extend that padding/truncation for the 1GB alignment use case. Fixes: 315c562536c4 ("libnvdimm, pfn: add 'align' attribute...") Reported-and-tested-by: Jane Chu Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- drivers/nvdimm/pfn_devs.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 65cc171c721de8..5ba9ab7af73645 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -582,6 +582,12 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn, return altmap; } +static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys) +{ + return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys), + ALIGN_DOWN(phys, nd_pfn->align)); +} + static int nd_pfn_init(struct nd_pfn *nd_pfn) { u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; @@ -637,13 +643,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) start = nsio->res.start; size = PHYS_SECTION_ALIGN_UP(start + size) - start; if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, - IORES_DESC_NONE) == REGION_MIXED) { + IORES_DESC_NONE) == REGION_MIXED + || !IS_ALIGNED(start + resource_size(&nsio->res), + nd_pfn->align)) { size = resource_size(&nsio->res); - end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size); + end_trunc = start + size - phys_pmem_align_down(nd_pfn, + start + size); } if (start_pad + end_trunc) - dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n", + dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n", dev_name(&ndns->dev), start_pad + end_trunc); /* From 166f39bc340d0eb739d31ddd347df593d7b6ff97 Mon Sep 17 00:00:00 2001 From: Vishal Verma Date: Mon, 18 Dec 2017 09:28:39 -0700 Subject: [PATCH 1048/3326] libnvdimm, btt: Fix an incompatibility in the log layout commit 24e3a7fb60a9187e5df90e5fa655ffc94b9c4f77 upstream. Due to a spec misinterpretation, the Linux implementation of the BTT log area had different padding scheme from other implementations, such as UEFI and NVML. This fixes the padding scheme, and defaults to it for new BTT layouts. We attempt to detect the padding scheme in use when probing for an existing BTT. If we detect the older/incompatible scheme, we continue using it. Reported-by: Juston Li Cc: Dan Williams Fixes: 5212e11fde4d ("nd_btt: atomic sector updates") Signed-off-by: Vishal Verma Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- drivers/nvdimm/btt.c | 201 +++++++++++++++++++++++++++++++++++-------- drivers/nvdimm/btt.h | 45 +++++++++- 2 files changed, 211 insertions(+), 35 deletions(-) diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index d5612bd1cc81cc..09428ebd315b22 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -210,12 +210,12 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, return ret; } -static int btt_log_read_pair(struct arena_info *arena, u32 lane, - struct log_entry *ent) +static int btt_log_group_read(struct arena_info *arena, u32 lane, + struct log_group *log) { return arena_read_bytes(arena, - arena->logoff + (2 * lane * LOG_ENT_SIZE), ent, - 2 * LOG_ENT_SIZE, 0); + arena->logoff + (lane * LOG_GRP_SIZE), log, + LOG_GRP_SIZE, 0); } static struct dentry *debugfs_root; @@ -255,6 +255,8 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent, debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff); debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off); debugfs_create_x32("flags", S_IRUGO, d, &a->flags); + debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]); + debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]); } static void btt_debugfs_init(struct btt *btt) @@ -273,6 +275,11 @@ static void btt_debugfs_init(struct btt *btt) } } +static u32 log_seq(struct log_group *log, int log_idx) +{ + return le32_to_cpu(log->ent[log_idx].seq); +} + /* * This function accepts two log entries, and uses the * sequence number to find the 'older' entry. @@ -282,8 +289,10 @@ static void btt_debugfs_init(struct btt *btt) * * TODO The logic feels a bit kludge-y. make it better.. */ -static int btt_log_get_old(struct log_entry *ent) +static int btt_log_get_old(struct arena_info *a, struct log_group *log) { + int idx0 = a->log_index[0]; + int idx1 = a->log_index[1]; int old; /* @@ -291,23 +300,23 @@ static int btt_log_get_old(struct log_entry *ent) * the next time, the following logic works out to put this * (next) entry into [1] */ - if (ent[0].seq == 0) { - ent[0].seq = cpu_to_le32(1); + if (log_seq(log, idx0) == 0) { + log->ent[idx0].seq = cpu_to_le32(1); return 0; } - if (ent[0].seq == ent[1].seq) + if (log_seq(log, idx0) == log_seq(log, idx1)) return -EINVAL; - if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5) + if (log_seq(log, idx0) + log_seq(log, idx1) > 5) return -EINVAL; - if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) { - if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1) + if (log_seq(log, idx0) < log_seq(log, idx1)) { + if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1) old = 0; else old = 1; } else { - if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1) + if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1) old = 1; else old = 0; @@ -327,17 +336,18 @@ static int btt_log_read(struct arena_info *arena, u32 lane, { int ret; int old_ent, ret_ent; - struct log_entry log[2]; + struct log_group log; - ret = btt_log_read_pair(arena, lane, log); + ret = btt_log_group_read(arena, lane, &log); if (ret) return -EIO; - old_ent = btt_log_get_old(log); + old_ent = btt_log_get_old(arena, &log); if (old_ent < 0 || old_ent > 1) { dev_err(to_dev(arena), "log corruption (%d): lane %d seq [%d, %d]\n", - old_ent, lane, log[0].seq, log[1].seq); + old_ent, lane, log.ent[arena->log_index[0]].seq, + log.ent[arena->log_index[1]].seq); /* TODO set error state? */ return -EIO; } @@ -345,7 +355,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane, ret_ent = (old_flag ? old_ent : (1 - old_ent)); if (ent != NULL) - memcpy(ent, &log[ret_ent], LOG_ENT_SIZE); + memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE); return ret_ent; } @@ -359,17 +369,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane, u32 sub, struct log_entry *ent, unsigned long flags) { int ret; - /* - * Ignore the padding in log_entry for calculating log_half. - * The entry is 'committed' when we write the sequence number, - * and we want to ensure that that is the last thing written. - * We don't bother writing the padding as that would be extra - * media wear and write amplification - */ - unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2; - u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE); + u32 group_slot = arena->log_index[sub]; + unsigned int log_half = LOG_ENT_SIZE / 2; void *src = ent; + u64 ns_off; + ns_off = arena->logoff + (lane * LOG_GRP_SIZE) + + (group_slot * LOG_ENT_SIZE); /* split the 16B write into atomic, durable halves */ ret = arena_write_bytes(arena, ns_off, src, log_half, flags); if (ret) @@ -452,7 +458,7 @@ static int btt_log_init(struct arena_info *arena) { size_t logsize = arena->info2off - arena->logoff; size_t chunk_size = SZ_4K, offset = 0; - struct log_entry log; + struct log_entry ent; void *zerobuf; int ret; u32 i; @@ -484,11 +490,11 @@ static int btt_log_init(struct arena_info *arena) } for (i = 0; i < arena->nfree; i++) { - log.lba = cpu_to_le32(i); - log.old_map = cpu_to_le32(arena->external_nlba + i); - log.new_map = cpu_to_le32(arena->external_nlba + i); - log.seq = cpu_to_le32(LOG_SEQ_INIT); - ret = __btt_log_write(arena, i, 0, &log, 0); + ent.lba = cpu_to_le32(i); + ent.old_map = cpu_to_le32(arena->external_nlba + i); + ent.new_map = cpu_to_le32(arena->external_nlba + i); + ent.seq = cpu_to_le32(LOG_SEQ_INIT); + ret = __btt_log_write(arena, i, 0, &ent, 0); if (ret) goto free; } @@ -593,6 +599,123 @@ static int btt_freelist_init(struct arena_info *arena) return 0; } +static bool ent_is_padding(struct log_entry *ent) +{ + return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0) + && (ent->seq == 0); +} + +/* + * Detecting valid log indices: We read a log group (see the comments in btt.h + * for a description of a 'log_group' and its 'slots'), and iterate over its + * four slots. We expect that a padding slot will be all-zeroes, and use this + * to detect a padding slot vs. an actual entry. + * + * If a log_group is in the initial state, i.e. hasn't been used since the + * creation of this BTT layout, it will have three of the four slots with + * zeroes. We skip over these log_groups for the detection of log_index. If + * all log_groups are in the initial state (i.e. the BTT has never been + * written to), it is safe to assume the 'new format' of log entries in slots + * (0, 1). + */ +static int log_set_indices(struct arena_info *arena) +{ + bool idx_set = false, initial_state = true; + int ret, log_index[2] = {-1, -1}; + u32 i, j, next_idx = 0; + struct log_group log; + u32 pad_count = 0; + + for (i = 0; i < arena->nfree; i++) { + ret = btt_log_group_read(arena, i, &log); + if (ret < 0) + return ret; + + for (j = 0; j < 4; j++) { + if (!idx_set) { + if (ent_is_padding(&log.ent[j])) { + pad_count++; + continue; + } else { + /* Skip if index has been recorded */ + if ((next_idx == 1) && + (j == log_index[0])) + continue; + /* valid entry, record index */ + log_index[next_idx] = j; + next_idx++; + } + if (next_idx == 2) { + /* two valid entries found */ + idx_set = true; + } else if (next_idx > 2) { + /* too many valid indices */ + return -ENXIO; + } + } else { + /* + * once the indices have been set, just verify + * that all subsequent log groups are either in + * their initial state or follow the same + * indices. + */ + if (j == log_index[0]) { + /* entry must be 'valid' */ + if (ent_is_padding(&log.ent[j])) + return -ENXIO; + } else if (j == log_index[1]) { + ; + /* + * log_index[1] can be padding if the + * lane never got used and it is still + * in the initial state (three 'padding' + * entries) + */ + } else { + /* entry must be invalid (padding) */ + if (!ent_is_padding(&log.ent[j])) + return -ENXIO; + } + } + } + /* + * If any of the log_groups have more than one valid, + * non-padding entry, then the we are no longer in the + * initial_state + */ + if (pad_count < 3) + initial_state = false; + pad_count = 0; + } + + if (!initial_state && !idx_set) + return -ENXIO; + + /* + * If all the entries in the log were in the initial state, + * assume new padding scheme + */ + if (initial_state) + log_index[1] = 1; + + /* + * Only allow the known permutations of log/padding indices, + * i.e. (0, 1), and (0, 2) + */ + if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2))) + ; /* known index possibilities */ + else { + dev_err(to_dev(arena), "Found an unknown padding scheme\n"); + return -ENXIO; + } + + arena->log_index[0] = log_index[0]; + arena->log_index[1] = log_index[1]; + dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]); + dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]); + return 0; +} + static int btt_rtt_init(struct arena_info *arena) { arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL); @@ -649,8 +772,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size, available -= 2 * BTT_PG_SIZE; /* The log takes a fixed amount of space based on nfree */ - logsize = roundup(2 * arena->nfree * sizeof(struct log_entry), - BTT_PG_SIZE); + logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE); available -= logsize; /* Calculate optimal split between map and data area */ @@ -667,6 +789,10 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size, arena->mapoff = arena->dataoff + datasize; arena->logoff = arena->mapoff + mapsize; arena->info2off = arena->logoff + logsize; + + /* Default log indices are (0,1) */ + arena->log_index[0] = 0; + arena->log_index[1] = 1; return arena; } @@ -757,6 +883,13 @@ static int discover_arenas(struct btt *btt) arena->external_lba_start = cur_nlba; parse_arena_meta(arena, super, cur_off); + ret = log_set_indices(arena); + if (ret) { + dev_err(to_dev(arena), + "Unable to deduce log/padding indices\n"); + goto out; + } + mutex_init(&arena->err_lock); ret = btt_freelist_init(arena); if (ret) diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h index 578c2057524d39..2609683c416796 100644 --- a/drivers/nvdimm/btt.h +++ b/drivers/nvdimm/btt.h @@ -27,6 +27,7 @@ #define MAP_ERR_MASK (1 << MAP_ERR_SHIFT) #define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT))) #define MAP_ENT_NORMAL 0xC0000000 +#define LOG_GRP_SIZE sizeof(struct log_group) #define LOG_ENT_SIZE sizeof(struct log_entry) #define ARENA_MIN_SIZE (1UL << 24) /* 16 MB */ #define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */ @@ -50,12 +51,52 @@ enum btt_init_state { INIT_READY }; +/* + * A log group represents one log 'lane', and consists of four log entries. + * Two of the four entries are valid entries, and the remaining two are + * padding. Due to an old bug in the padding location, we need to perform a + * test to determine the padding scheme being used, and use that scheme + * thereafter. + * + * In kernels prior to 4.15, 'log group' would have actual log entries at + * indices (0, 2) and padding at indices (1, 3), where as the correct/updated + * format has log entries at indices (0, 1) and padding at indices (2, 3). + * + * Old (pre 4.15) format: + * +-----------------+-----------------+ + * | ent[0] | ent[1] | + * | 16B | 16B | + * | lba/old/new/seq | pad | + * +-----------------------------------+ + * | ent[2] | ent[3] | + * | 16B | 16B | + * | lba/old/new/seq | pad | + * +-----------------+-----------------+ + * + * New format: + * +-----------------+-----------------+ + * | ent[0] | ent[1] | + * | 16B | 16B | + * | lba/old/new/seq | lba/old/new/seq | + * +-----------------------------------+ + * | ent[2] | ent[3] | + * | 16B | 16B | + * | pad | pad | + * +-----------------+-----------------+ + * + * We detect during start-up which format is in use, and set + * arena->log_index[(0, 1)] with the detected format. + */ + struct log_entry { __le32 lba; __le32 old_map; __le32 new_map; __le32 seq; - __le64 padding[2]; +}; + +struct log_group { + struct log_entry ent[4]; }; struct btt_sb { @@ -125,6 +166,7 @@ struct aligned_lock { * @list: List head for list of arenas * @debugfs_dir: Debugfs dentry * @flags: Arena flags - may signify error states. + * @log_index: Indices of the valid log entries in a log_group * * arena_info is a per-arena handle. Once an arena is narrowed down for an * IO, this struct is passed around for the duration of the IO. @@ -157,6 +199,7 @@ struct arena_info { /* Arena flags */ u32 flags; struct mutex err_lock; + int log_index[2]; }; /** From 01b1a29e32c13c69c0fd8a70e6cd66ac91316862 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 19 Dec 2017 15:07:10 -0800 Subject: [PATCH 1049/3326] libnvdimm, pfn: fix start_pad handling for aligned namespaces commit 19deaa217bc04e83b59b5e8c8229eb0e53ad9efc upstream. The alignment checks at pfn driver startup fail to properly account for the 'start_pad' in the case where the namespace is misaligned relative to its internal alignment. This is typically triggered in 1G aligned namespace, but could theoretically trigger with small namespace alignments. When this triggers the kernel reports messages of the form: dax2.1: bad offset: 0x3c000000 dax disabled align: 0x40000000 Fixes: 1ee6667cd8d1 ("libnvdimm, pfn, dax: fix initialization vs autodetect...") Reported-by: Jane Chu Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- drivers/nvdimm/pfn_devs.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 5ba9ab7af73645..2adada1a585517 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -364,9 +364,9 @@ struct device *nd_pfn_create(struct nd_region *nd_region) int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) { u64 checksum, offset; - unsigned long align; enum nd_pfn_mode mode; struct nd_namespace_io *nsio; + unsigned long align, start_pad; struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; struct nd_namespace_common *ndns = nd_pfn->ndns; const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev); @@ -410,6 +410,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) align = le32_to_cpu(pfn_sb->align); offset = le64_to_cpu(pfn_sb->dataoff); + start_pad = le32_to_cpu(pfn_sb->start_pad); if (align == 0) align = 1UL << ilog2(offset); mode = le32_to_cpu(pfn_sb->mode); @@ -468,7 +469,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) return -EBUSY; } - if ((align && !IS_ALIGNED(offset, align)) + if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align)) || !IS_ALIGNED(offset, PAGE_SIZE)) { dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled align: %#lx\n", From b7c0181c47c4369cfccd97bca7e9e211026aa94c Mon Sep 17 00:00:00 2001 From: Yelena Krivosheev Date: Tue, 19 Dec 2017 17:59:45 +0100 Subject: [PATCH 1050/3326] net: mvneta: clear interface link status on port disable commit 4423c18e466afdfb02a36ee8b9f901d144b3c607 upstream. When port connect to PHY in polling mode (with poll interval 1 sec), port and phy link status must be synchronize in order don't loss link change event. [gregory.clement@free-electrons.com: add fixes tag] Fixes: c5aff18204da ("net: mvneta: driver for Marvell Armada 370/XP network unit") Signed-off-by: Yelena Krivosheev Tested-by: Dmitri Epshtein Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/marvell/mvneta.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index bc93b69cfd1edc..16b2bfb2cf5172 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1214,6 +1214,10 @@ static void mvneta_port_disable(struct mvneta_port *pp) val &= ~MVNETA_GMAC0_PORT_ENABLE; mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); + pp->link = 0; + pp->duplex = -1; + pp->speed = 0; + udelay(200); } From c00802d53ddf3724602c1da0344c6fcaf9001358 Mon Sep 17 00:00:00 2001 From: Yelena Krivosheev Date: Tue, 19 Dec 2017 17:59:46 +0100 Subject: [PATCH 1051/3326] net: mvneta: use proper rxq_number in loop on rx queues commit ca5902a6547f662419689ca28b3c29a772446caa upstream. When adding the RX queue association with each CPU, a typo was made in the mvneta_cleanup_rxqs() function. This patch fixes it. [gregory.clement@free-electrons.com: add commit log and fixes tag] Fixes: 2dcf75e2793c ("net: mvneta: Associate RX queues with each CPU") Signed-off-by: Yelena Krivosheev Tested-by: Dmitri Epshtein Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/marvell/mvneta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 16b2bfb2cf5172..1e0835655c936a 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3015,7 +3015,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp) { int queue; - for (queue = 0; queue < txq_number; queue++) + for (queue = 0; queue < rxq_number; queue++) mvneta_rxq_deinit(pp, &pp->rxqs[queue]); } From 646809937c02396f06a38a38ac686eff1b5e85fb Mon Sep 17 00:00:00 2001 From: Yelena Krivosheev Date: Tue, 19 Dec 2017 17:59:47 +0100 Subject: [PATCH 1052/3326] net: mvneta: eliminate wrong call to handle rx descriptor error commit 2eecb2e04abb62ef8ea7b43e1a46bdb5b99d1bf8 upstream. There are few reasons in mvneta_rx_swbm() function when received packet is dropped. mvneta_rx_error() should be called only if error bit [16] is set in rx descriptor. [gregory.clement@free-electrons.com: add fixes tag] Fixes: dc35a10f68d3 ("net: mvneta: bm: add support for hardware buffer management") Signed-off-by: Yelena Krivosheev Tested-by: Dmitri Epshtein Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/marvell/mvneta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 1e0835655c936a..a539263cd79ce4 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1962,9 +1962,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo, if (!mvneta_rxq_desc_is_first_last(rx_status) || (rx_status & MVNETA_RXD_ERR_SUMMARY)) { + mvneta_rx_error(pp, rx_desc); err_drop_frame: dev->stats.rx_errors++; - mvneta_rx_error(pp, rx_desc); /* leave the descriptor untouched */ continue; } From af1eddcc176e36104f50abf32b14cb979385d9f6 Mon Sep 17 00:00:00 2001 From: John Einar Reitan Date: Sun, 24 Dec 2017 00:03:44 +0100 Subject: [PATCH 1053/3326] Revert "ipmi_si: fix memory leak on new_smi" This reverts commit c97e41076a298dbc4e910c33048e553658388eed, which incorrectly was taken from upstream c0a32fe13cd323ca9420500b16fd69589c9ba91e. The referenced memory leak doesn't exist on the 4.14 stable branch as the new logic of doing the kzalloc hasn't moved to this function. By adding this kfree we actually end up doing double kfree as all callers of smi_add does a kfree on error. Sample with SLAB_FREELIST_HARDENED=y: ipmi_si: Adding ACPI-specified kcs state machine IPMI System Interface driver. ipmi_si: probing via SPMI ipmi_si: SPMI: io 0xca2 regsize 1 spacing 1 irq 0 (NULL device *): SPMI-specified kcs state machine: duplicate ------------[ cut here ]------------ kernel BUG at mm/slub.c:295! invalid opcode: 0000 [#1] SMP Modules linked in: CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.14.8-gentoo-r1 #5 Hardware name: Supermicro X9SCL/X9SCM/X9SCL/X9SCM, BIOS 2.2 02/20/2015 task: ffff88080c208000 task.stack: ffffc90000020000 RIP: 0010:kfree+0xf5/0x157 RSP: 0000:ffffc90000023e58 EFLAGS: 00010246 RAX: ffff88080b2e6200 RBX: ffff88080b2e6200 RCX: ffff88080b2e6200 RDX: 000000000000008e RSI: ffff88082fc1cd60 RDI: ffff88080c003080 RBP: ffffc90000002808 R08: 000000000001cd60 R09: ffffffff814da10e R10: ffffea00202cb980 R11: 000000000000005c R12: ffffffff814da10e R13: 00000000ffffffed R14: ffffffff82317bd0 R15: 0000000000000003 FS: 0000000000000000(0000) GS:ffff88082fc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 0000000002e09001 CR4: 00000000001606f0 Call Trace: init_ipmi_si+0x493/0x5c7 ? cleanup_ipmi_si+0x84/0x84 ? set_debug_rodata+0xc/0xc ? kthread+0x4c/0x11c do_one_initcall+0x94/0x13d ? set_debug_rodata+0xc/0xc kernel_init_freeable+0x112/0x18e ? rest_init+0xa0/0xa0 kernel_init+0x5/0xe1 ret_from_fork+0x22/0x30 Code: 24 18 49 8b 7a 30 48 8b 37 65 48 8b 56 08 65 48 03 35 3a 29 e2 7e 4c 3b 56 10 75 39 48 8b 0e 48 63 47 20 48 01 d8 48 39 cb 75 02 <0f> 0b 49 89 c0 4c 33 87 40 01 00 00 4c 31 c1 48 89 08 48 8d 4a ---[ end trace 4ac2e2c100842676 ]--- Signed-off-by: John Einar Reitan Signed-off-by: Greg Kroah-Hartman --- drivers/char/ipmi/ipmi_si_intf.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index e1cbb78c680640..c04aa11f0e2145 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -3469,7 +3469,6 @@ static int add_smi(struct smi_info *new_smi) ipmi_addr_src_to_str(new_smi->addr_source), si_to_str[new_smi->si_type]); rv = -EBUSY; - kfree(new_smi); goto out_err; } } From b8ce8232fcc37fe7a97db79ea0a5f32098c25e72 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 29 Dec 2017 17:53:50 +0100 Subject: [PATCH 1054/3326] Linux 4.14.10 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ed2132c6d28606..9edfb78836a9e4 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 9 +SUBLEVEL = 10 EXTRAVERSION = NAME = Petit Gorille From 234bc12669a362773e84834ace52f1af7510196b Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Fri, 22 Dec 2017 20:38:57 -0500 Subject: [PATCH 1055/3326] tracing: Remove extra zeroing out of the ring buffer page commit 6b7e633fe9c24682df550e5311f47fb524701586 upstream. The ring_buffer_read_page() takes care of zeroing out any extra data in the page that it returns. There's no need to zero it out again from the consumer. It was removed from one consumer of this function, but read_buffers_splice_read() did not remove it, and worse, it contained a nasty bug because of it. Fixes: 2711ca237a084 ("ring-buffer: Move zeroing out excess in page to ring buffer code") Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 80de14973b42b8..75ae95356a71b7 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -6769,7 +6769,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, .spd_release = buffer_spd_release, }; struct buffer_ref *ref; - int entries, size, i; + int entries, i; ssize_t ret = 0; #ifdef CONFIG_TRACER_MAX_TRACE @@ -6823,14 +6823,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, break; } - /* - * zero out any left over data, this is going to - * user land. - */ - size = ring_buffer_page_len(ref->page); - if (size < PAGE_SIZE) - memset(ref->page + size, 0, PAGE_SIZE - size); - page = virt_to_page(ref->page); spd.pages[i] = page; From 21a9c7346ef696161dacbbd9f47dabb0f062c4c8 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Tue, 26 Dec 2017 20:07:34 -0500 Subject: [PATCH 1056/3326] tracing: Fix possible double free on failure of allocating trace buffer commit 4397f04575c44e1440ec2e49b6302785c95fd2f8 upstream. Jing Xia and Chunyan Zhang reported that on failing to allocate part of the tracing buffer, memory is freed, but the pointers that point to them are not initialized back to NULL, and later paths may try to free the freed memory again. Jing and Chunyan fixed one of the locations that does this, but missed a spot. Link: http://lkml.kernel.org/r/20171226071253.8968-1-chunyan.zhang@spreadtrum.com Fixes: 737223fbca3b1 ("tracing: Consolidate buffer allocation code") Reported-by: Jing Xia Reported-by: Chunyan Zhang Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 75ae95356a71b7..68d87d2b382226 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -7580,6 +7580,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size buf->data = alloc_percpu(struct trace_array_cpu); if (!buf->data) { ring_buffer_free(buf->buffer); + buf->buffer = NULL; return -ENOMEM; } From 98669825616890a96c9fc67c5f983484578e4dd9 Mon Sep 17 00:00:00 2001 From: Jing Xia Date: Tue, 26 Dec 2017 15:12:53 +0800 Subject: [PATCH 1057/3326] tracing: Fix crash when it fails to alloc ring buffer commit 24f2aaf952ee0b59f31c3a18b8b36c9e3d3c2cf5 upstream. Double free of the ring buffer happens when it fails to alloc new ring buffer instance for max_buffer if TRACER_MAX_TRACE is configured. The root cause is that the pointer is not set to NULL after the buffer is freed in allocate_trace_buffers(), and the freeing of the ring buffer is invoked again later if the pointer is not equal to Null, as: instance_mkdir() |-allocate_trace_buffers() |-allocate_trace_buffer(tr, &tr->trace_buffer...) |-allocate_trace_buffer(tr, &tr->max_buffer...) // allocate fail(-ENOMEM),first free // and the buffer pointer is not set to null |-ring_buffer_free(tr->trace_buffer.buffer) // out_free_tr |-free_trace_buffers() |-free_trace_buffer(&tr->trace_buffer); //if trace_buffer is not null, free again |-ring_buffer_free(buf->buffer) |-rb_free_cpu_buffer(buffer->buffers[cpu]) // ring_buffer_per_cpu is null, and // crash in ring_buffer_per_cpu->pages Link: http://lkml.kernel.org/r/20171226071253.8968-1-chunyan.zhang@spreadtrum.com Fixes: 737223fbca3b1 ("tracing: Consolidate buffer allocation code") Signed-off-by: Jing Xia Signed-off-by: Chunyan Zhang Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 68d87d2b382226..76bcc80b893ebc 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -7604,7 +7604,9 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) allocate_snapshot ? size : 1); if (WARN_ON(ret)) { ring_buffer_free(tr->trace_buffer.buffer); + tr->trace_buffer.buffer = NULL; free_percpu(tr->trace_buffer.data); + tr->trace_buffer.data = NULL; return -ENOMEM; } tr->allocated_snapshot = allocate_snapshot; From 72a2beddcd3240047552de69ce45a28029c7e56c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 4 Dec 2017 15:07:33 +0100 Subject: [PATCH 1058/3326] x86/cpufeatures: Add X86_BUG_CPU_INSECURE commit a89f040fa34ec9cd682aed98b8f04e3c47d998bd upstream. Many x86 CPUs leak information to user space due to missing isolation of user space and kernel space page tables. There are many well documented ways to exploit that. The upcoming software migitation of isolating the user and kernel space page tables needs a misfeature flag so code can be made runtime conditional. Add the BUG bits which indicates that the CPU is affected and add a feature bit which indicates that the software migitation is enabled. Assume for now that _ALL_ x86 CPUs are affected by this. Exceptions can be made later. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 3 ++- arch/x86/include/asm/disabled-features.h | 8 +++++++- arch/x86/kernel/cpu/common.c | 4 ++++ 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 800104c8a3edfe..d8ec834ea88400 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -201,7 +201,7 @@ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ - +#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ #define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */ @@ -340,5 +340,6 @@ #define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ #define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ +#define X86_BUG_CPU_INSECURE X86_BUG(14) /* CPU is insecure and needs kernel page table isolation */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index c10c9128f54e6b..e428e16dd82247 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -44,6 +44,12 @@ # define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31)) #endif +#ifdef CONFIG_PAGE_TABLE_ISOLATION +# define DISABLE_PTI 0 +#else +# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) +#endif + /* * Make sure to add features to the correct mask */ @@ -54,7 +60,7 @@ #define DISABLED_MASK4 (DISABLE_PCID) #define DISABLED_MASK5 0 #define DISABLED_MASK6 0 -#define DISABLED_MASK7 0 +#define DISABLED_MASK7 (DISABLE_PTI) #define DISABLED_MASK8 0 #define DISABLED_MASK9 (DISABLE_MPX) #define DISABLED_MASK10 0 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 8ddcfa4d4165bb..a9210f9b7cf842 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -898,6 +898,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) } setup_force_cpu_cap(X86_FEATURE_ALWAYS); + + /* Assume for now that ALL x86 CPUs are insecure */ + setup_force_cpu_bug(X86_BUG_CPU_INSECURE); + fpu__init_system(c); #ifdef CONFIG_X86_32 From acfee9b8e27e7b5d69276e8b804fed7ff5071c10 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 4 Dec 2017 15:07:34 +0100 Subject: [PATCH 1059/3326] x86/mm/pti: Disable global pages if PAGE_TABLE_ISOLATION=y commit c313ec66317d421fb5768d78c56abed2dc862264 upstream. Global pages stay in the TLB across context switches. Since all contexts share the same kernel mapping, these mappings are marked as global pages so kernel entries in the TLB are not flushed out on a context switch. But, even having these entries in the TLB opens up something that an attacker can use, such as the double-page-fault attack: http://www.ieee-security.org/TC/SP2013/papers/4977a191.pdf That means that even when PAGE_TABLE_ISOLATION switches page tables on return to user space the global pages would stay in the TLB cache. Disable global pages so that kernel TLB entries can be flushed before returning to user space. This way, all accesses to kernel addresses from userspace result in a TLB miss independent of the existence of a kernel mapping. Suppress global pages via the __supported_pte_mask. The user space mappings set PAGE_GLOBAL for the minimal kernel mappings which are required for entry/exit. These mappings are set up manually so the filtering does not take place. [ The __supported_pte_mask simplification was written by Thomas Gleixner. ] Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/init.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index a22c2b95e51339..0202234203089d 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -161,6 +161,12 @@ struct map_range { static int page_size_mask; +static void enable_global_pages(void) +{ + if (!static_cpu_has(X86_FEATURE_PTI)) + __supported_pte_mask |= _PAGE_GLOBAL; +} + static void __init probe_page_size_mask(void) { /* @@ -179,11 +185,11 @@ static void __init probe_page_size_mask(void) cr4_set_bits_and_update_boot(X86_CR4_PSE); /* Enable PGE if available */ + __supported_pte_mask &= ~_PAGE_GLOBAL; if (boot_cpu_has(X86_FEATURE_PGE)) { cr4_set_bits_and_update_boot(X86_CR4_PGE); - __supported_pte_mask |= _PAGE_GLOBAL; - } else - __supported_pte_mask &= ~_PAGE_GLOBAL; + enable_global_pages(); + } /* Enable 1 GB linear kernel mappings if available: */ if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) { From f3d2b767e912b11d146b9c9922bf28efeda0cdc7 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 4 Dec 2017 15:07:35 +0100 Subject: [PATCH 1060/3326] x86/mm/pti: Prepare the x86/entry assembly code for entry/exit CR3 switching commit 8a09317b895f073977346779df52f67c1056d81d upstream. PAGE_TABLE_ISOLATION needs to switch to a different CR3 value when it enters the kernel and switch back when it exits. This essentially needs to be done before leaving assembly code. This is extra challenging because the switching context is tricky: the registers that can be clobbered can vary. It is also hard to store things on the stack because there is an established ABI (ptregs) or the stack is entirely unsafe to use. Establish a set of macros that allow changing to the user and kernel CR3 values. Interactions with SWAPGS: Previous versions of the PAGE_TABLE_ISOLATION code relied on having per-CPU scratch space to save/restore a register that can be used for the CR3 MOV. The %GS register is used to index into our per-CPU space, so SWAPGS *had* to be done before the CR3 switch. That scratch space is gone now, but the semantic that SWAPGS must be done before the CR3 MOV is retained. This is good to keep because it is not that hard to do and it allows to do things like add per-CPU debugging information. What this does in the NMI code is worth pointing out. NMIs can interrupt *any* context and they can also be nested with NMIs interrupting other NMIs. The comments below ".Lnmi_from_kernel" explain the format of the stack during this situation. Changing the format of this stack is hard. Instead of storing the old CR3 value on the stack, this depends on the *regular* register save/restore mechanism and then uses %r14 to keep CR3 during the NMI. It is callee-saved and will not be clobbered by the C NMI handlers that get called. [ PeterZ: ESPFIX optimization ] Based-on-code-from: Andy Lutomirski Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Reviewed-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 66 ++++++++++++++++++++++++++++++++ arch/x86/entry/entry_64.S | 45 +++++++++++++++++++--- arch/x86/entry/entry_64_compat.S | 24 +++++++++++- 3 files changed, 128 insertions(+), 7 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 3fd8bc560faece..a9d17a7686ab36 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include #include +#include +#include /* @@ -187,6 +189,70 @@ For 32-bit we have the following conventions - kernel is built with #endif .endm +#ifdef CONFIG_PAGE_TABLE_ISOLATION + +/* PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two halves: */ +#define PTI_SWITCH_MASK (1< in kernel */ SWAPGS xorl %ebx, %ebx -1: ret + +1: + SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 + + ret END(paranoid_entry) /* @@ -1266,6 +1287,7 @@ ENTRY(paranoid_exit) testl %ebx, %ebx /* swapgs needed? */ jnz .Lparanoid_exit_no_swapgs TRACE_IRQS_IRETQ + RESTORE_CR3 save_reg=%r14 SWAPGS_UNSAFE_STACK jmp .Lparanoid_exit_restore .Lparanoid_exit_no_swapgs: @@ -1293,6 +1315,8 @@ ENTRY(error_entry) * from user mode due to an IRET fault. */ SWAPGS + /* We have user CR3. Change to kernel CR3. */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax .Lerror_entry_from_usermode_after_swapgs: /* Put us onto the real thread stack. */ @@ -1339,6 +1363,7 @@ ENTRY(error_entry) * .Lgs_change's error handler with kernel gsbase. */ SWAPGS + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax jmp .Lerror_entry_done .Lbstep_iret: @@ -1348,10 +1373,11 @@ ENTRY(error_entry) .Lerror_bad_iret: /* - * We came from an IRET to user mode, so we have user gsbase. - * Switch to kernel gsbase: + * We came from an IRET to user mode, so we have user + * gsbase and CR3. Switch to kernel gsbase and CR3: */ SWAPGS + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax /* * Pretend that the exception came from user mode: set up pt_regs @@ -1383,6 +1409,10 @@ END(error_exit) /* * Runs on exception stack. Xen PV does not go through this path at all, * so we can use real assembly here. + * + * Registers: + * %r14: Used to save/restore the CR3 of the interrupted context + * when PAGE_TABLE_ISOLATION is in use. Do not clobber. */ ENTRY(nmi) UNWIND_HINT_IRET_REGS @@ -1446,6 +1476,7 @@ ENTRY(nmi) swapgs cld + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx movq %rsp, %rdx movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp UNWIND_HINT_IRET_REGS base=%rdx offset=8 @@ -1698,6 +1729,8 @@ end_repeat_nmi: movq $-1, %rsi call do_nmi + RESTORE_CR3 save_reg=%r14 + testl %ebx, %ebx /* swapgs needed? */ jnz nmi_restore nmi_swapgs: diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 95ad40eb7effbd..05238b29895eea 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -49,6 +49,10 @@ ENTRY(entry_SYSENTER_compat) /* Interrupts are off on entry. */ SWAPGS + + /* We are about to clobber %rsp anyway, clobbering here is OK */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* @@ -215,6 +219,12 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe) pushq $0 /* pt_regs->r14 = 0 */ pushq $0 /* pt_regs->r15 = 0 */ + /* + * We just saved %rdi so it is safe to clobber. It is not + * preserved during the C calls inside TRACE_IRQS_OFF anyway. + */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi + /* * User mode is traced as though IRQs are on, and SYSENTER * turned them off. @@ -256,10 +266,22 @@ sysret32_from_system_call: * when the system call started, which is already known to user * code. We zero R8-R10 to avoid info leaks. */ + movq RSP-ORIG_RAX(%rsp), %rsp + + /* + * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored + * on the process stack which is not mapped to userspace and + * not readable after we SWITCH_TO_USER_CR3. Delay the CR3 + * switch until after after the last reference to the process + * stack. + * + * %r8 is zeroed before the sysret, thus safe to clobber. + */ + SWITCH_TO_USER_CR3 scratch_reg=%r8 + xorq %r8, %r8 xorq %r9, %r9 xorq %r10, %r10 - movq RSP-ORIG_RAX(%rsp), %rsp swapgs sysretl END(entry_SYSCALL_compat) From a4b07fb4e5a6aef3b87a6540cc04cf972525a723 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 4 Dec 2017 15:07:36 +0100 Subject: [PATCH 1061/3326] x86/mm/pti: Add infrastructure for page table isolation commit aa8c6248f8c75acfd610fe15d8cae23cf70d9d09 upstream. Add the initial files for kernel page table isolation, with a minimal init function and the boot time detection for this misfeature. Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- .../admin-guide/kernel-parameters.txt | 2 + arch/x86/boot/compressed/pagetable.c | 3 + arch/x86/entry/calling.h | 7 ++ arch/x86/include/asm/pti.h | 14 ++++ arch/x86/mm/Makefile | 7 +- arch/x86/mm/init.c | 2 + arch/x86/mm/pti.c | 84 +++++++++++++++++++ include/linux/pti.h | 11 +++ init/main.c | 3 + 9 files changed, 130 insertions(+), 3 deletions(-) create mode 100644 arch/x86/include/asm/pti.h create mode 100644 arch/x86/mm/pti.c create mode 100644 include/linux/pti.h diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 05496622b4effb..5dfd26265484ed 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2685,6 +2685,8 @@ steal time is computed, but won't influence scheduler behaviour + nopti [X86-64] Disable kernel page table isolation + nolapic [X86-32,APIC] Do not enable or use the local APIC. nolapic_timer [X86-32,APIC] Do not use the local APIC timer. diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c index 972319ff5b0193..e691ff734cb5ad 100644 --- a/arch/x86/boot/compressed/pagetable.c +++ b/arch/x86/boot/compressed/pagetable.c @@ -23,6 +23,9 @@ */ #undef CONFIG_AMD_MEM_ENCRYPT +/* No PAGE_TABLE_ISOLATION support needed either: */ +#undef CONFIG_PAGE_TABLE_ISOLATION + #include "misc.h" /* These actually do the work of building the kernel identity maps. */ diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index a9d17a7686ab36..3d3389a92c3395 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -205,18 +205,23 @@ For 32-bit we have the following conventions - kernel is built with .endm .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI mov %cr3, \scratch_reg ADJUST_KERNEL_CR3 \scratch_reg mov \scratch_reg, %cr3 +.Lend_\@: .endm .macro SWITCH_TO_USER_CR3 scratch_reg:req + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI mov %cr3, \scratch_reg ADJUST_USER_CR3 \scratch_reg mov \scratch_reg, %cr3 +.Lend_\@: .endm .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req + ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI movq %cr3, \scratch_reg movq \scratch_reg, \save_reg /* @@ -233,11 +238,13 @@ For 32-bit we have the following conventions - kernel is built with .endm .macro RESTORE_CR3 save_reg:req + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI /* * The CR3 write could be avoided when not changing its value, * but would require a CR3 read *and* a scratch register. */ movq \save_reg, %cr3 +.Lend_\@: .endm #else /* CONFIG_PAGE_TABLE_ISOLATION=n: */ diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h new file mode 100644 index 00000000000000..0b5ef05b2d2d9f --- /dev/null +++ b/arch/x86/include/asm/pti.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _ASM_X86_PTI_H +#define _ASM_X86_PTI_H +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +extern void pti_init(void); +extern void pti_check_boottime_disable(void); +#else +static inline void pti_check_boottime_disable(void) { } +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_X86_PTI_H */ diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 2e0017af8f9b06..52906808e27757 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -43,9 +43,10 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o obj-$(CONFIG_ACPI_NUMA) += srat.o obj-$(CONFIG_NUMA_EMU) += numa_emulation.o -obj-$(CONFIG_X86_INTEL_MPX) += mpx.o -obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o -obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o +obj-$(CONFIG_X86_INTEL_MPX) += mpx.o +obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o +obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o +obj-$(CONFIG_PAGE_TABLE_ISOLATION) += pti.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 0202234203089d..af75069fb11605 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -20,6 +20,7 @@ #include #include #include +#include /* * We need to define the tracepoints somewhere, and tlb.c @@ -630,6 +631,7 @@ void __init init_mem_mapping(void) { unsigned long end; + pti_check_boottime_disable(); probe_page_size_mask(); setup_pcid(); diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c new file mode 100644 index 00000000000000..375f23a758bcbb --- /dev/null +++ b/arch/x86/mm/pti.c @@ -0,0 +1,84 @@ +/* + * Copyright(c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * This code is based in part on work published here: + * + * https://github.com/IAIK/KAISER + * + * The original work was written by and and signed off by for the Linux + * kernel by: + * + * Signed-off-by: Richard Fellner + * Signed-off-by: Moritz Lipp + * Signed-off-by: Daniel Gruss + * Signed-off-by: Michael Schwarz + * + * Major changes to the original code by: Dave Hansen + * Mostly rewritten by Thomas Gleixner and + * Andy Lutomirsky + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt + +static void __init pti_print_if_insecure(const char *reason) +{ + if (boot_cpu_has_bug(X86_BUG_CPU_INSECURE)) + pr_info("%s\n", reason); +} + +void __init pti_check_boottime_disable(void) +{ + if (hypervisor_is_type(X86_HYPER_XEN_PV)) { + pti_print_if_insecure("disabled on XEN PV."); + return; + } + + if (cmdline_find_option_bool(boot_command_line, "nopti")) { + pti_print_if_insecure("disabled on command line."); + return; + } + + if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE)) + return; + + setup_force_cpu_cap(X86_FEATURE_PTI); +} + +/* + * Initialize kernel page table isolation + */ +void __init pti_init(void) +{ + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + pr_info("enabled\n"); +} diff --git a/include/linux/pti.h b/include/linux/pti.h new file mode 100644 index 00000000000000..0174883a935a25 --- /dev/null +++ b/include/linux/pti.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _INCLUDE_PTI_H +#define _INCLUDE_PTI_H + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +#include +#else +static inline void pti_init(void) { } +#endif + +#endif diff --git a/init/main.c b/init/main.c index 8a390f60ec81e5..b32ec72cdf3dd8 100644 --- a/init/main.c +++ b/init/main.c @@ -75,6 +75,7 @@ #include #include #include +#include #include #include #include @@ -506,6 +507,8 @@ static void __init mm_init(void) ioremap_huge_init(); /* Should be run before the first non-init thread is created */ init_espfix_bsp(); + /* Should be run after espfix64 is set up. */ + pti_init(); } asmlinkage __visible void __init start_kernel(void) From 8a2533407f4d60a43effadf7b62825d213cd678c Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 12 Dec 2017 14:39:52 +0100 Subject: [PATCH 1062/3326] x86/pti: Add the pti= cmdline option and documentation commit 41f4c20b57a4890ea7f56ff8717cc83fefb8d537 upstream. Keep the "nopti" optional for traditional reasons. [ tglx: Don't allow force on when running on XEN PV and made 'on' printout conditional ] Requested-by: Linus Torvalds Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Andy Lutomirsky Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171212133952.10177-1-bp@alien8.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- .../admin-guide/kernel-parameters.txt | 6 +++++ arch/x86/mm/pti.c | 26 ++++++++++++++++++- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 5dfd26265484ed..520fdec15bbb5b 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3255,6 +3255,12 @@ pt. [PARIDE] See Documentation/blockdev/paride.txt. + pti= [X86_64] + Control user/kernel address space isolation: + on - enable + off - disable + auto - default setting + pty.legacy_count= [KNL] Number of legacy pty's. Overwrites compiled-in default number. diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 375f23a758bcbb..a13f6b1098651a 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -54,21 +54,45 @@ static void __init pti_print_if_insecure(const char *reason) pr_info("%s\n", reason); } +static void __init pti_print_if_secure(const char *reason) +{ + if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE)) + pr_info("%s\n", reason); +} + void __init pti_check_boottime_disable(void) { + char arg[5]; + int ret; + if (hypervisor_is_type(X86_HYPER_XEN_PV)) { pti_print_if_insecure("disabled on XEN PV."); return; } + ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg)); + if (ret > 0) { + if (ret == 3 && !strncmp(arg, "off", 3)) { + pti_print_if_insecure("disabled on command line."); + return; + } + if (ret == 2 && !strncmp(arg, "on", 2)) { + pti_print_if_secure("force enabled on command line."); + goto enable; + } + if (ret == 4 && !strncmp(arg, "auto", 4)) + goto autosel; + } + if (cmdline_find_option_bool(boot_command_line, "nopti")) { pti_print_if_insecure("disabled on command line."); return; } +autosel: if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE)) return; - +enable: setup_force_cpu_cap(X86_FEATURE_PTI); } From b9feab7dcf86df222a405df3e0d95b85741a2d73 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 4 Dec 2017 15:07:37 +0100 Subject: [PATCH 1063/3326] x86/mm/pti: Add mapping helper functions commit 61e9b3671007a5da8127955a1a3bda7e0d5f42e8 upstream. Add the pagetable helper functions do manage the separate user space page tables. [ tglx: Split out from the big combo kaiser patch. Folded Andys simplification and made it out of line as Boris suggested ] Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/pgtable.h | 6 +- arch/x86/include/asm/pgtable_64.h | 92 +++++++++++++++++++++++++++++++ arch/x86/mm/pti.c | 41 ++++++++++++++ 3 files changed, 138 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index f02de8bc1f7202..99b10d187db107 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -909,7 +909,11 @@ static inline int pgd_none(pgd_t pgd) * pgd_offset() returns a (pgd_t *) * pgd_index() is used get the offset into the pgd page's array of pgd_t's; */ -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) +#define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address))) +/* + * a shortcut to get a pgd_t in a given mm + */ +#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address)) /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index e9f05331e732a0..81462e9a34f6af 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -131,9 +131,97 @@ static inline pud_t native_pudp_get_and_clear(pud_t *xp) #endif } +#ifdef CONFIG_PAGE_TABLE_ISOLATION +/* + * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages + * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and + * the user one is in the last 4k. To switch between them, you + * just need to flip the 12th bit in their addresses. + */ +#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT + +/* + * This generates better code than the inline assembly in + * __set_bit(). + */ +static inline void *ptr_set_bit(void *ptr, int bit) +{ + unsigned long __ptr = (unsigned long)ptr; + + __ptr |= BIT(bit); + return (void *)__ptr; +} +static inline void *ptr_clear_bit(void *ptr, int bit) +{ + unsigned long __ptr = (unsigned long)ptr; + + __ptr &= ~BIT(bit); + return (void *)__ptr; +} + +static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp) +{ + return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT); +} + +static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp) +{ + return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT); +} + +static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp) +{ + return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT); +} + +static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp) +{ + return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT); +} +#endif /* CONFIG_PAGE_TABLE_ISOLATION */ + +/* + * Page table pages are page-aligned. The lower half of the top + * level is used for userspace and the top half for the kernel. + * + * Returns true for parts of the PGD that map userspace and + * false for the parts that map the kernel. + */ +static inline bool pgdp_maps_userspace(void *__ptr) +{ + unsigned long ptr = (unsigned long)__ptr; + + return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2); +} + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd); + +/* + * Take a PGD location (pgdp) and a pgd value that needs to be set there. + * Populates the user and returns the resulting PGD that must be set in + * the kernel copy of the page tables. + */ +static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) +{ + if (!static_cpu_has(X86_FEATURE_PTI)) + return pgd; + return __pti_set_user_pgd(pgdp, pgd); +} +#else +static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) +{ + return pgd; +} +#endif + static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) { +#if defined(CONFIG_PAGE_TABLE_ISOLATION) && !defined(CONFIG_X86_5LEVEL) + p4dp->pgd = pti_set_user_pgd(&p4dp->pgd, p4d.pgd); +#else *p4dp = p4d; +#endif } static inline void native_p4d_clear(p4d_t *p4d) @@ -147,7 +235,11 @@ static inline void native_p4d_clear(p4d_t *p4d) static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) { +#ifdef CONFIG_PAGE_TABLE_ISOLATION + *pgdp = pti_set_user_pgd(pgdp, pgd); +#else *pgdp = pgd; +#endif } static inline void native_pgd_clear(pgd_t *pgd) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index a13f6b1098651a..69a98336539296 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -96,6 +96,47 @@ void __init pti_check_boottime_disable(void) setup_force_cpu_cap(X86_FEATURE_PTI); } +pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) +{ + /* + * Changes to the high (kernel) portion of the kernelmode page + * tables are not automatically propagated to the usermode tables. + * + * Users should keep in mind that, unlike the kernelmode tables, + * there is no vmalloc_fault equivalent for the usermode tables. + * Top-level entries added to init_mm's usermode pgd after boot + * will not be automatically propagated to other mms. + */ + if (!pgdp_maps_userspace(pgdp)) + return pgd; + + /* + * The user page tables get the full PGD, accessible from + * userspace: + */ + kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd; + + /* + * If this is normal user memory, make it NX in the kernel + * pagetables so that, if we somehow screw up and return to + * usermode with the kernel CR3 loaded, we'll get a page fault + * instead of allowing user code to execute with the wrong CR3. + * + * As exceptions, we don't set NX if: + * - _PAGE_USER is not set. This could be an executable + * EFI runtime mapping or something similar, and the kernel + * may execute from it + * - we don't have NX support + * - we're clearing the PGD (i.e. the new pgd is not present). + */ + if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) && + (__supported_pte_mask & _PAGE_NX)) + pgd.pgd |= _PAGE_NX; + + /* return the copy of the PGD we want the kernel to use: */ + return pgd; +} + /* * Initialize kernel page table isolation */ From ffcb80ad79e8c3d87b43f2c9ee4b9170c7ec12ea Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 4 Dec 2017 15:07:38 +0100 Subject: [PATCH 1064/3326] x86/mm/pti: Allow NX poison to be set in p4d/pgd commit 1c4de1ff4fe50453b968579ee86fac3da80dd783 upstream. With PAGE_TABLE_ISOLATION the user portion of the kernel page tables is poisoned with the NX bit so if the entry code exits with the kernel page tables selected in CR3, userspace crashes. But doing so trips the p4d/pgd_bad() checks. Make sure it does not do that. Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/pgtable.h | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 99b10d187db107..f4c42c6d350c78 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -846,7 +846,12 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) static inline int p4d_bad(p4d_t p4d) { - return (p4d_flags(p4d) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; + unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER; + + if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) + ignore_flags |= _PAGE_NX; + + return (p4d_flags(p4d) & ~ignore_flags) != 0; } #endif /* CONFIG_PGTABLE_LEVELS > 3 */ @@ -880,7 +885,12 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) static inline int pgd_bad(pgd_t pgd) { - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; + unsigned long ignore_flags = _PAGE_USER; + + if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) + ignore_flags |= _PAGE_NX; + + return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE; } static inline int pgd_none(pgd_t pgd) From 61fd4049e6760eb8832d4e0bec592d8a810b270f Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 4 Dec 2017 15:07:39 +0100 Subject: [PATCH 1065/3326] x86/mm/pti: Allocate a separate user PGD commit d9e9a6418065bb376e5de8d93ce346939b9a37a6 upstream. Kernel page table isolation requires to have two PGDs. One for the kernel, which contains the full kernel mapping plus the user space mapping and one for user space which contains the user space mappings and the minimal set of kernel mappings which are required by the architecture to be able to transition from and to user space. Add the necessary preliminaries. [ tglx: Split out from the big kaiser dump. EFI fixup from Kirill ] Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/pgalloc.h | 11 +++++++++++ arch/x86/kernel/head_64.S | 30 +++++++++++++++++++++++++++--- arch/x86/mm/pgtable.c | 5 +++-- arch/x86/platform/efi/efi_64.c | 5 ++++- 4 files changed, 45 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 4b5e1eafada731..aff42e1da6ee15 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -30,6 +30,17 @@ static inline void paravirt_release_p4d(unsigned long pfn) {} */ extern gfp_t __userpte_alloc_gfp; +#ifdef CONFIG_PAGE_TABLE_ISOLATION +/* + * Instead of one PGD, we acquire two PGDs. Being order-1, it is + * both 8k in size and 8k-aligned. That lets us just flip bit 12 + * in a pointer to swap between the two 4k halves. + */ +#define PGD_ALLOCATION_ORDER 1 +#else +#define PGD_ALLOCATION_ORDER 0 +#endif + /* * Allocate and free page tables. */ diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 7dca675fe78db6..04a625f0fcda32 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -341,6 +341,27 @@ GLOBAL(early_recursion_flag) .balign PAGE_SIZE; \ GLOBAL(name) +#ifdef CONFIG_PAGE_TABLE_ISOLATION +/* + * Each PGD needs to be 8k long and 8k aligned. We do not + * ever go out to userspace with these, so we do not + * strictly *need* the second page, but this allows us to + * have a single set_pgd() implementation that does not + * need to worry about whether it has 4k or 8k to work + * with. + * + * This ensures PGDs are 8k long: + */ +#define PTI_USER_PGD_FILL 512 +/* This ensures they are 8k-aligned: */ +#define NEXT_PGD_PAGE(name) \ + .balign 2 * PAGE_SIZE; \ +GLOBAL(name) +#else +#define NEXT_PGD_PAGE(name) NEXT_PAGE(name) +#define PTI_USER_PGD_FILL 0 +#endif + /* Automate the creation of 1 to 1 mapping pmd entries */ #define PMDS(START, PERM, COUNT) \ i = 0 ; \ @@ -350,13 +371,14 @@ GLOBAL(name) .endr __INITDATA -NEXT_PAGE(early_top_pgt) +NEXT_PGD_PAGE(early_top_pgt) .fill 511,8,0 #ifdef CONFIG_X86_5LEVEL .quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC #else .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC #endif + .fill PTI_USER_PGD_FILL,8,0 NEXT_PAGE(early_dynamic_pgts) .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 @@ -364,13 +386,14 @@ NEXT_PAGE(early_dynamic_pgts) .data #if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) -NEXT_PAGE(init_top_pgt) +NEXT_PGD_PAGE(init_top_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + PGD_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + PGD_START_KERNEL*8, 0 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC + .fill PTI_USER_PGD_FILL,8,0 NEXT_PAGE(level3_ident_pgt) .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC @@ -381,8 +404,9 @@ NEXT_PAGE(level2_ident_pgt) */ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) #else -NEXT_PAGE(init_top_pgt) +NEXT_PGD_PAGE(init_top_pgt) .fill 512,8,0 + .fill PTI_USER_PGD_FILL,8,0 #endif #ifdef CONFIG_X86_5LEVEL diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 17ebc5a978ccd9..9b7bcbd33cc246 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -355,14 +355,15 @@ static inline void _pgd_free(pgd_t *pgd) kmem_cache_free(pgd_cache, pgd); } #else + static inline pgd_t *_pgd_alloc(void) { - return (pgd_t *)__get_free_page(PGALLOC_GFP); + return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER); } static inline void _pgd_free(pgd_t *pgd) { - free_page((unsigned long)pgd); + free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); } #endif /* CONFIG_X86_PAE */ diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 20fb31579b6942..39c4b35ac7a4a9 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -195,6 +195,9 @@ static pgd_t *efi_pgd; * because we want to avoid inserting EFI region mappings (EFI_VA_END * to EFI_VA_START) into the standard kernel page tables. Everything * else can be shared, see efi_sync_low_kernel_mappings(). + * + * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the + * allocation. */ int __init efi_alloc_page_tables(void) { @@ -207,7 +210,7 @@ int __init efi_alloc_page_tables(void) return 0; gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO; - efi_pgd = (pgd_t *)__get_free_page(gfp_mask); + efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER); if (!efi_pgd) return -ENOMEM; From 1bcd98df0f50b28a5fb40eeb2cb7a17a02820232 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 4 Dec 2017 15:07:40 +0100 Subject: [PATCH 1066/3326] x86/mm/pti: Populate user PGD commit fc2fbc8512ed08d1de7720936fd7d2e4ce02c3a2 upstream. In clone_pgd_range() copy the init user PGDs which cover the kernel half of the address space, so a process has all the required kernel mappings visible. [ tglx: Split out from the big kaiser dump and folded Andys simplification ] Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/pgtable.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index f4c42c6d350c78..9aaeea68783469 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -1125,7 +1125,14 @@ static inline int pud_write(pud_t pud) */ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) { - memcpy(dst, src, count * sizeof(pgd_t)); + memcpy(dst, src, count * sizeof(pgd_t)); +#ifdef CONFIG_PAGE_TABLE_ISOLATION + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + /* Clone the user space pgd as well */ + memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src), + count * sizeof(pgd_t)); +#endif } #define PTE_SHIFT ilog2(PTRS_PER_PTE) From 9f006b0247234e6448e5fd04b5bcb0c56c968698 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:42 +0100 Subject: [PATCH 1067/3326] x86/mm/pti: Add functions to clone kernel PMDs commit 03f4424f348e8be95eb1bbeba09461cd7b867828 upstream. Provide infrastructure to: - find a kernel PMD for a mapping which must be visible to user space for the entry/exit code to work. - walk an address range and share the kernel PMD with it. This reuses a small part of the original KAISER patches to populate the user space page table. [ tglx: Made it universally usable so it can be used for any kind of shared mapping. Add a mechanism to clear specific bits in the user space visible PMD entry. Folded Andys simplifactions ] Originally-by: Dave Hansen Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/pti.c | 127 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 69a98336539296..d58bcee470fcb6 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -48,6 +48,11 @@ #undef pr_fmt #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt +/* Backporting helper */ +#ifndef __GFP_NOTRACK +#define __GFP_NOTRACK 0 +#endif + static void __init pti_print_if_insecure(const char *reason) { if (boot_cpu_has_bug(X86_BUG_CPU_INSECURE)) @@ -137,6 +142,128 @@ pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) return pgd; } +/* + * Walk the user copy of the page tables (optionally) trying to allocate + * page table pages on the way down. + * + * Returns a pointer to a P4D on success, or NULL on failure. + */ +static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) +{ + pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + + if (address < PAGE_OFFSET) { + WARN_ONCE(1, "attempt to walk user address\n"); + return NULL; + } + + if (pgd_none(*pgd)) { + unsigned long new_p4d_page = __get_free_page(gfp); + if (!new_p4d_page) + return NULL; + + if (pgd_none(*pgd)) { + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); + new_p4d_page = 0; + } + if (new_p4d_page) + free_page(new_p4d_page); + } + BUILD_BUG_ON(pgd_large(*pgd) != 0); + + return p4d_offset(pgd, address); +} + +/* + * Walk the user copy of the page tables (optionally) trying to allocate + * page table pages on the way down. + * + * Returns a pointer to a PMD on success, or NULL on failure. + */ +static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) +{ + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + p4d_t *p4d = pti_user_pagetable_walk_p4d(address); + pud_t *pud; + + BUILD_BUG_ON(p4d_large(*p4d) != 0); + if (p4d_none(*p4d)) { + unsigned long new_pud_page = __get_free_page(gfp); + if (!new_pud_page) + return NULL; + + if (p4d_none(*p4d)) { + set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page))); + new_pud_page = 0; + } + if (new_pud_page) + free_page(new_pud_page); + } + + pud = pud_offset(p4d, address); + /* The user page tables do not use large mappings: */ + if (pud_large(*pud)) { + WARN_ON(1); + return NULL; + } + if (pud_none(*pud)) { + unsigned long new_pmd_page = __get_free_page(gfp); + if (!new_pmd_page) + return NULL; + + if (pud_none(*pud)) { + set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page))); + new_pmd_page = 0; + } + if (new_pmd_page) + free_page(new_pmd_page); + } + + return pmd_offset(pud, address); +} + +static void __init +pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear) +{ + unsigned long addr; + + /* + * Clone the populated PMDs which cover start to end. These PMD areas + * can have holes. + */ + for (addr = start; addr < end; addr += PMD_SIZE) { + pmd_t *pmd, *target_pmd; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + + pgd = pgd_offset_k(addr); + if (WARN_ON(pgd_none(*pgd))) + return; + p4d = p4d_offset(pgd, addr); + if (WARN_ON(p4d_none(*p4d))) + return; + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) + continue; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + continue; + + target_pmd = pti_user_pagetable_walk_pmd(addr); + if (WARN_ON(!target_pmd)) + return; + + /* + * Copy the PMD. That is, the kernelmode and usermode + * tables will share the last-level page tables of this + * address range + */ + *target_pmd = pmd_clear_flags(*pmd, clear); + } +} + /* * Initialize kernel page table isolation */ From 35531133abf37ea2f00673a0953e397c286f7c7c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 4 Dec 2017 15:07:43 +0100 Subject: [PATCH 1068/3326] x86/mm/pti: Force entry through trampoline when PTI active commit 8d4b067895791ab9fdb1aadfc505f64d71239dd2 upstream. Force the entry through the trampoline only when PTI is active. Otherwise go through the normal entry code. Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/common.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a9210f9b7cf842..f2a94dfb434e9a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1339,7 +1339,10 @@ void syscall_init(void) (entry_SYSCALL_64_trampoline - _entry_trampoline); wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); - wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline); + if (static_cpu_has(X86_FEATURE_PTI)) + wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline); + else + wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); #ifdef CONFIG_IA32_EMULATION wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); From fb9dfabb6e803801b9e88ee6c0d56d3b54531b95 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 4 Dec 2017 15:07:45 +0100 Subject: [PATCH 1069/3326] x86/mm/pti: Share cpu_entry_area with user space page tables commit f7cfbee91559ca7e3e961a00ffac921208a115ad upstream. Share the cpu entry area so the user space and kernel space page tables have the same P4D page. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/pti.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index d58bcee470fcb6..59290356f19f78 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -264,6 +264,29 @@ pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear) } } +/* + * Clone a single p4d (i.e. a top-level entry on 4-level systems and a + * next-level entry on 5-level systems. + */ +static void __init pti_clone_p4d(unsigned long addr) +{ + p4d_t *kernel_p4d, *user_p4d; + pgd_t *kernel_pgd; + + user_p4d = pti_user_pagetable_walk_p4d(addr); + kernel_pgd = pgd_offset_k(addr); + kernel_p4d = p4d_offset(kernel_pgd, addr); + *user_p4d = *kernel_p4d; +} + +/* + * Clone the CPU_ENTRY_AREA into the user space visible page table. + */ +static void __init pti_clone_user_shared(void) +{ + pti_clone_p4d(CPU_ENTRY_AREA_BASE); +} + /* * Initialize kernel page table isolation */ @@ -273,4 +296,6 @@ void __init pti_init(void) return; pr_info("enabled\n"); + + pti_clone_user_shared(); } From 088baf5de12eb7660e20f3f4efc1cf270acff5f4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 4 Dec 2017 15:07:46 +0100 Subject: [PATCH 1070/3326] x86/entry: Align entry text section to PMD boundary commit 2f7412ba9c6af5ab16bdbb4a3fdb1dcd2b4fd3c2 upstream. The (irq)entry text must be visible in the user space page tables. To allow simple PMD based sharing, make the entry text PMD aligned. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/vmlinux.lds.S | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index d2a8b5a24a44a5..1e413a9326aaa1 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -61,11 +61,17 @@ jiffies_64 = jiffies; . = ALIGN(HPAGE_SIZE); \ __end_rodata_hpage_align = .; +#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); +#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); + #else #define X64_ALIGN_RODATA_BEGIN #define X64_ALIGN_RODATA_END +#define ALIGN_ENTRY_TEXT_BEGIN +#define ALIGN_ENTRY_TEXT_END + #endif PHDRS { @@ -102,8 +108,10 @@ SECTIONS CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT + ALIGN_ENTRY_TEXT_BEGIN ENTRY_TEXT IRQENTRY_TEXT + ALIGN_ENTRY_TEXT_END SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) From e08aa2f1988a7d4ded9c9674fe18857ee5c6fc53 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 4 Dec 2017 15:07:47 +0100 Subject: [PATCH 1071/3326] x86/mm/pti: Share entry text PMD commit 6dc72c3cbca0580642808d677181cad4c6433893 upstream. Share the entry text PMD of the kernel mapping with the user space mapping. If large pages are enabled this is a single PMD entry and at the point where it is copied into the user page table the RW bit has not been cleared yet. Clear it right away so the user space visible map becomes RX. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/pti.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 59290356f19f78..0e78797650a7e4 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -287,6 +287,15 @@ static void __init pti_clone_user_shared(void) pti_clone_p4d(CPU_ENTRY_AREA_BASE); } +/* + * Clone the populated PMDs of the entry and irqentry text and force it RO. + */ +static void __init pti_clone_entry_text(void) +{ + pti_clone_pmds((unsigned long) __entry_text_start, + (unsigned long) __irqentry_text_end, _PAGE_RW); +} + /* * Initialize kernel page table isolation */ @@ -298,4 +307,5 @@ void __init pti_init(void) pr_info("enabled\n"); pti_clone_user_shared(); + pti_clone_entry_text(); } From d230c1917f57c3beee2e0204a4c8c58999758b95 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Fri, 15 Dec 2017 22:08:18 +0100 Subject: [PATCH 1072/3326] x86/mm/pti: Map ESPFIX into user space commit 4b6bbe95b87966ba08999574db65c93c5e925a36 upstream. Map the ESPFIX pages into user space when PTI is enabled. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Kees Cook Cc: Linus Torvalds Cc: Peter Zijlstra Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/pti.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 0e78797650a7e4..b1c38ef9fbbb78 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -287,6 +287,16 @@ static void __init pti_clone_user_shared(void) pti_clone_p4d(CPU_ENTRY_AREA_BASE); } +/* + * Clone the ESPFIX P4D into the user space visinble page table + */ +static void __init pti_setup_espfix64(void) +{ +#ifdef CONFIG_X86_ESPFIX64 + pti_clone_p4d(ESPFIX_BASE_ADDR); +#endif +} + /* * Clone the populated PMDs of the entry and irqentry text and force it RO. */ @@ -308,4 +318,5 @@ void __init pti_init(void) pti_clone_user_shared(); pti_clone_entry_text(); + pti_setup_espfix64(); } From e0eb34665d2eecddfa7a1810b76fae52313c1286 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 4 Dec 2017 15:07:49 +0100 Subject: [PATCH 1073/3326] x86/cpu_entry_area: Add debugstore entries to cpu_entry_area commit 10043e02db7f8a4161f76434931051e7d797a5f6 upstream. The Intel PEBS/BTS debug store is a design trainwreck as it expects virtual addresses which must be visible in any execution context. So it is required to make these mappings visible to user space when kernel page table isolation is active. Provide enough room for the buffer mappings in the cpu_entry_area so the buffers are available in the user space visible page tables. At the point where the kernel side entry area is populated there is no buffer available yet, but the kernel PMD must be populated. To achieve this set the entries for these buffers to non present. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/intel/ds.c | 5 ++-- arch/x86/events/perf_event.h | 21 ++-------------- arch/x86/include/asm/cpu_entry_area.h | 13 ++++++++++ arch/x86/include/asm/intel_ds.h | 36 +++++++++++++++++++++++++++ arch/x86/mm/cpu_entry_area.c | 27 ++++++++++++++++++++ 5 files changed, 81 insertions(+), 21 deletions(-) create mode 100644 arch/x86/include/asm/intel_ds.h diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 3674a4b6f8bd0c..6522f0279cb89b 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -8,11 +8,12 @@ #include "../perf_event.h" +/* Waste a full page so it can be mapped into the cpu_entry_area */ +DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); + /* The size of a BTS record in bytes: */ #define BTS_RECORD_SIZE 24 -#define BTS_BUFFER_SIZE (PAGE_SIZE << 4) -#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4) #define PEBS_FIXUP_SIZE PAGE_SIZE /* diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index f7aaadf9331fb7..373f9eda80b1b1 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -14,6 +14,8 @@ #include +#include + /* To enable MSR tracing please use the generic trace points. */ /* @@ -77,8 +79,6 @@ struct amd_nb { struct event_constraint event_constraints[X86_PMC_IDX_MAX]; }; -/* The maximal number of PEBS events: */ -#define MAX_PEBS_EVENTS 8 #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1) /* @@ -95,23 +95,6 @@ struct amd_nb { PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER) -/* - * A debug store configuration. - * - * We only support architectures that use 64bit fields. - */ -struct debug_store { - u64 bts_buffer_base; - u64 bts_index; - u64 bts_absolute_maximum; - u64 bts_interrupt_threshold; - u64 pebs_buffer_base; - u64 pebs_index; - u64 pebs_absolute_maximum; - u64 pebs_interrupt_threshold; - u64 pebs_event_reset[MAX_PEBS_EVENTS]; -}; - #define PEBS_REGS \ (PERF_REG_X86_AX | \ PERF_REG_X86_BX | \ diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h index 2fbc69a0916edb..4a7884b8dca55b 100644 --- a/arch/x86/include/asm/cpu_entry_area.h +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -5,6 +5,7 @@ #include #include +#include /* * cpu_entry_area is a percpu region that contains things needed by the CPU @@ -40,6 +41,18 @@ struct cpu_entry_area { */ char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]; #endif +#ifdef CONFIG_CPU_SUP_INTEL + /* + * Per CPU debug store for Intel performance monitoring. Wastes a + * full page at the moment. + */ + struct debug_store cpu_debug_store; + /* + * The actual PEBS/BTS buffers must be mapped to user space + * Reserve enough fixmap PTEs. + */ + struct debug_store_buffers cpu_debug_buffers; +#endif }; #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h new file mode 100644 index 00000000000000..62a9f4966b4298 --- /dev/null +++ b/arch/x86/include/asm/intel_ds.h @@ -0,0 +1,36 @@ +#ifndef _ASM_INTEL_DS_H +#define _ASM_INTEL_DS_H + +#include + +#define BTS_BUFFER_SIZE (PAGE_SIZE << 4) +#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4) + +/* The maximal number of PEBS events: */ +#define MAX_PEBS_EVENTS 8 + +/* + * A debug store configuration. + * + * We only support architectures that use 64bit fields. + */ +struct debug_store { + u64 bts_buffer_base; + u64 bts_index; + u64 bts_absolute_maximum; + u64 bts_interrupt_threshold; + u64 pebs_buffer_base; + u64 pebs_index; + u64 pebs_absolute_maximum; + u64 pebs_interrupt_threshold; + u64 pebs_event_reset[MAX_PEBS_EVENTS]; +} __aligned(PAGE_SIZE); + +DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); + +struct debug_store_buffers { + char bts_buffer[BTS_BUFFER_SIZE]; + char pebs_buffer[PEBS_BUFFER_SIZE]; +}; + +#endif diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index fe814fd5e0140f..b9283cc276220d 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -38,6 +38,32 @@ cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot) cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot); } +static void percpu_setup_debug_store(int cpu) +{ +#ifdef CONFIG_CPU_SUP_INTEL + int npages; + void *cea; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return; + + cea = &get_cpu_entry_area(cpu)->cpu_debug_store; + npages = sizeof(struct debug_store) / PAGE_SIZE; + BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0); + cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, + PAGE_KERNEL); + + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers; + /* + * Force the population of PMDs for not yet allocated per cpu + * memory like debug store buffers. + */ + npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; + for (; npages; npages--, cea += PAGE_SIZE) + cea_set_pte(cea, 0, PAGE_NONE); +#endif +} + /* Setup the fixmap mappings only once per-processor */ static void __init setup_cpu_entry_area(int cpu) { @@ -109,6 +135,7 @@ static void __init setup_cpu_entry_area(int cpu) cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline, __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); #endif + percpu_setup_debug_store(cpu); } static __init void setup_cpu_entry_area_ptes(void) From 8b82023b7fc2c3734aae23bc03ed5937e67f7388 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 4 Dec 2017 15:07:50 +0100 Subject: [PATCH 1074/3326] x86/events/intel/ds: Map debug buffers in cpu_entry_area commit c1961a4631daef4aeabee8e368b1b13e8f173c91 upstream. The BTS and PEBS buffers both have their virtual addresses programmed into the hardware. This means that any access to them is performed via the page tables. The times that the hardware accesses these are entirely dependent on how the performance monitoring hardware events are set up. In other words, there is no way for the kernel to tell when the hardware might access these buffers. To avoid perf crashes, place 'debug_store' allocate pages and map them into the cpu_entry_area. The PEBS fixup buffer does not need this treatment. [ tglx: Got rid of the kaiser_add_mapping() complication ] Signed-off-by: Hugh Dickins Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/intel/ds.c | 125 ++++++++++++++++++++++------------- arch/x86/events/perf_event.h | 2 + 2 files changed, 82 insertions(+), 45 deletions(-) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 6522f0279cb89b..8f0aace08b8797 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -3,6 +3,7 @@ #include #include +#include #include #include @@ -280,17 +281,52 @@ void fini_debug_store_on_cpu(int cpu) static DEFINE_PER_CPU(void *, insn_buffer); -static int alloc_pebs_buffer(int cpu) +static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + phys_addr_t pa; + size_t msz = 0; + + pa = virt_to_phys(addr); + for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE) + cea_set_pte(cea, pa, prot); +} + +static void ds_clear_cea(void *cea, size_t size) +{ + size_t msz = 0; + + for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE) + cea_set_pte(cea, 0, PAGE_NONE); +} + +static void *dsalloc_pages(size_t size, gfp_t flags, int cpu) +{ + unsigned int order = get_order(size); int node = cpu_to_node(cpu); - int max; - void *buffer, *ibuffer; + struct page *page; + + page = __alloc_pages_node(node, flags | __GFP_ZERO, order); + return page ? page_address(page) : NULL; +} + +static void dsfree_pages(const void *buffer, size_t size) +{ + if (buffer) + free_pages((unsigned long)buffer, get_order(size)); +} + +static int alloc_pebs_buffer(int cpu) +{ + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + size_t bsiz = x86_pmu.pebs_buffer_size; + int max, node = cpu_to_node(cpu); + void *buffer, *ibuffer, *cea; if (!x86_pmu.pebs) return 0; - buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node); + buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu); if (unlikely(!buffer)) return -ENOMEM; @@ -301,25 +337,27 @@ static int alloc_pebs_buffer(int cpu) if (x86_pmu.intel_cap.pebs_format < 2) { ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node); if (!ibuffer) { - kfree(buffer); + dsfree_pages(buffer, bsiz); return -ENOMEM; } per_cpu(insn_buffer, cpu) = ibuffer; } - - max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size; - - ds->pebs_buffer_base = (u64)(unsigned long)buffer; + hwev->ds_pebs_vaddr = buffer; + /* Update the cpu entry area mapping */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; + ds->pebs_buffer_base = (unsigned long) cea; + ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL); ds->pebs_index = ds->pebs_buffer_base; - ds->pebs_absolute_maximum = ds->pebs_buffer_base + - max * x86_pmu.pebs_record_size; - + max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size); + ds->pebs_absolute_maximum = ds->pebs_buffer_base + max; return 0; } static void release_pebs_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + void *cea; if (!ds || !x86_pmu.pebs) return; @@ -327,73 +365,70 @@ static void release_pebs_buffer(int cpu) kfree(per_cpu(insn_buffer, cpu)); per_cpu(insn_buffer, cpu) = NULL; - kfree((void *)(unsigned long)ds->pebs_buffer_base); + /* Clear the fixmap */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; + ds_clear_cea(cea, x86_pmu.pebs_buffer_size); ds->pebs_buffer_base = 0; + dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size); + hwev->ds_pebs_vaddr = NULL; } static int alloc_bts_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; - int node = cpu_to_node(cpu); - int max, thresh; - void *buffer; + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + void *buffer, *cea; + int max; if (!x86_pmu.bts) return 0; - buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node); + buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu); if (unlikely(!buffer)) { WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__); return -ENOMEM; } - - max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; - thresh = max / 16; - - ds->bts_buffer_base = (u64)(unsigned long)buffer; + hwev->ds_bts_vaddr = buffer; + /* Update the fixmap */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; + ds->bts_buffer_base = (unsigned long) cea; + ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL); ds->bts_index = ds->bts_buffer_base; - ds->bts_absolute_maximum = ds->bts_buffer_base + - max * BTS_RECORD_SIZE; - ds->bts_interrupt_threshold = ds->bts_absolute_maximum - - thresh * BTS_RECORD_SIZE; - + max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE); + ds->bts_absolute_maximum = ds->bts_buffer_base + max; + ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16); return 0; } static void release_bts_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + void *cea; if (!ds || !x86_pmu.bts) return; - kfree((void *)(unsigned long)ds->bts_buffer_base); + /* Clear the fixmap */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; + ds_clear_cea(cea, BTS_BUFFER_SIZE); ds->bts_buffer_base = 0; + dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE); + hwev->ds_bts_vaddr = NULL; } static int alloc_ds_buffer(int cpu) { - int node = cpu_to_node(cpu); - struct debug_store *ds; - - ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node); - if (unlikely(!ds)) - return -ENOMEM; + struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store; + memset(ds, 0, sizeof(*ds)); per_cpu(cpu_hw_events, cpu).ds = ds; - return 0; } static void release_ds_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; - - if (!ds) - return; - per_cpu(cpu_hw_events, cpu).ds = NULL; - kfree(ds); } void release_ds_buffers(void) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 373f9eda80b1b1..8e4ea143ed9640 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -199,6 +199,8 @@ struct cpu_hw_events { * Intel DebugStore bits */ struct debug_store *ds; + void *ds_pebs_vaddr; + void *ds_bts_vaddr; u64 pebs_enabled; int n_pebs; int n_large_pebs; From c125107490107fc4ce6bad0d9b45fd5e33bfa3f3 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Tue, 12 Dec 2017 07:56:44 -0800 Subject: [PATCH 1075/3326] x86/mm/64: Make a full PGD-entry size hole in the memory map commit 9f449772a3106bcdd4eb8fdeb281147b0e99fb30 upstream. Shrink vmalloc space from 16384TiB to 12800TiB to enlarge the hole starting at 0xff90000000000000 to be a full PGD entry. A subsequent patch will use this hole for the pagetable isolation LDT alias. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Kees Cook Cc: Kirill A. Shutemov Cc: Linus Torvalds Cc: Peter Zijlstra Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- Documentation/x86/x86_64/mm.txt | 4 ++-- arch/x86/include/asm/pgtable_64_types.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index 51101708a03ae1..496a1dbf139dad 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -29,8 +29,8 @@ Virtual memory map with 5 level page tables: hole caused by [56:63] sign extension ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory -ff90000000000000 - ff91ffffffffffff (=49 bits) hole -ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space +ff90000000000000 - ff9fffffffffffff (=52 bits) hole +ffa0000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space (12800 TB) ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) ... unused hole ... diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 3d27831bc58dfa..83e9489ae9443e 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -79,8 +79,8 @@ typedef struct { pteval_t pte; } pte_t; #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) #ifdef CONFIG_X86_5LEVEL -# define VMALLOC_SIZE_TB _AC(16384, UL) -# define __VMALLOC_BASE _AC(0xff92000000000000, UL) +# define VMALLOC_SIZE_TB _AC(12800, UL) +# define __VMALLOC_BASE _AC(0xffa0000000000000, UL) # define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) #else # define VMALLOC_SIZE_TB _AC(32, UL) From 7aef823ee7e9a74de0d2665d116bde557aa134ca Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Tue, 12 Dec 2017 07:56:45 -0800 Subject: [PATCH 1076/3326] x86/pti: Put the LDT in its own PGD if PTI is on commit f55f0501cbf65ec41cca5058513031b711730b1d upstream. With PTI enabled, the LDT must be mapped in the usermode tables somewhere. The LDT is per process, i.e. per mm. An earlier approach mapped the LDT on context switch into a fixmap area, but that's a big overhead and exhausted the fixmap space when NR_CPUS got big. Take advantage of the fact that there is an address space hole which provides a completely unused pgd. Use this pgd to manage per-mm LDT mappings. This has a down side: the LDT isn't (currently) randomized, and an attack that can write the LDT is instant root due to call gates (thanks, AMD, for leaving call gates in AMD64 but designing them wrong so they're only useful for exploits). This can be mitigated by making the LDT read-only or randomizing the mapping, either of which is strightforward on top of this patch. This will significantly slow down LDT users, but that shouldn't matter for important workloads -- the LDT is only used by DOSEMU(2), Wine, and very old libc implementations. [ tglx: Cleaned it up. ] Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Dave Hansen Cc: David Laight Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Kees Cook Cc: Kirill A. Shutemov Cc: Linus Torvalds Cc: Peter Zijlstra Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- Documentation/x86/x86_64/mm.txt | 3 +- arch/x86/include/asm/mmu_context.h | 59 +++++++++- arch/x86/include/asm/pgtable_64_types.h | 4 + arch/x86/include/asm/processor.h | 23 ++-- arch/x86/kernel/ldt.c | 139 +++++++++++++++++++++++- arch/x86/mm/dump_pagetables.c | 9 ++ 6 files changed, 220 insertions(+), 17 deletions(-) diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index 496a1dbf139dad..ad41b3813f0a3a 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -12,6 +12,7 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) ... unused hole ... ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) ... unused hole ... +fffffe0000000000 - fffffe7fffffffff (=39 bits) LDT remap for PTI fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ... unused hole ... @@ -29,7 +30,7 @@ Virtual memory map with 5 level page tables: hole caused by [56:63] sign extension ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory -ff90000000000000 - ff9fffffffffffff (=52 bits) hole +ff90000000000000 - ff9fffffffffffff (=52 bits) LDT remap for PTI ffa0000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space (12800 TB) ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 5ede7cae1d673e..c931b88982a0ff 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -50,10 +50,33 @@ struct ldt_struct { * call gates. On native, we could merge the ldt_struct and LDT * allocations, but it's not worth trying to optimize. */ - struct desc_struct *entries; - unsigned int nr_entries; + struct desc_struct *entries; + unsigned int nr_entries; + + /* + * If PTI is in use, then the entries array is not mapped while we're + * in user mode. The whole array will be aliased at the addressed + * given by ldt_slot_va(slot). We use two slots so that we can allocate + * and map, and enable a new LDT without invalidating the mapping + * of an older, still-in-use LDT. + * + * slot will be -1 if this LDT doesn't have an alias mapping. + */ + int slot; }; +/* This is a multiple of PAGE_SIZE. */ +#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE) + +static inline void *ldt_slot_va(int slot) +{ +#ifdef CONFIG_X86_64 + return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot); +#else + BUG(); +#endif +} + /* * Used for LDT copy/destruction. */ @@ -64,6 +87,7 @@ static inline void init_new_context_ldt(struct mm_struct *mm) } int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); void destroy_context_ldt(struct mm_struct *mm); +void ldt_arch_exit_mmap(struct mm_struct *mm); #else /* CONFIG_MODIFY_LDT_SYSCALL */ static inline void init_new_context_ldt(struct mm_struct *mm) { } static inline int ldt_dup_context(struct mm_struct *oldmm, @@ -71,7 +95,8 @@ static inline int ldt_dup_context(struct mm_struct *oldmm, { return 0; } -static inline void destroy_context_ldt(struct mm_struct *mm) {} +static inline void destroy_context_ldt(struct mm_struct *mm) { } +static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } #endif static inline void load_mm_ldt(struct mm_struct *mm) @@ -96,10 +121,31 @@ static inline void load_mm_ldt(struct mm_struct *mm) * that we can see. */ - if (unlikely(ldt)) - set_ldt(ldt->entries, ldt->nr_entries); - else + if (unlikely(ldt)) { + if (static_cpu_has(X86_FEATURE_PTI)) { + if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) { + /* + * Whoops -- either the new LDT isn't mapped + * (if slot == -1) or is mapped into a bogus + * slot (if slot > 1). + */ + clear_LDT(); + return; + } + + /* + * If page table isolation is enabled, ldt->entries + * will not be mapped in the userspace pagetables. + * Tell the CPU to access the LDT through the alias + * at ldt_slot_va(ldt->slot). + */ + set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries); + } else { + set_ldt(ldt->entries, ldt->nr_entries); + } + } else { clear_LDT(); + } #else clear_LDT(); #endif @@ -194,6 +240,7 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) static inline void arch_exit_mmap(struct mm_struct *mm) { paravirt_arch_exit_mmap(mm); + ldt_arch_exit_mmap(mm); } #ifdef CONFIG_X86_64 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 83e9489ae9443e..b97a539bcdeee8 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -82,10 +82,14 @@ typedef struct { pteval_t pte; } pte_t; # define VMALLOC_SIZE_TB _AC(12800, UL) # define __VMALLOC_BASE _AC(0xffa0000000000000, UL) # define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) +# define LDT_PGD_ENTRY _AC(-112, UL) +# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #else # define VMALLOC_SIZE_TB _AC(32, UL) # define __VMALLOC_BASE _AC(0xffffc90000000000, UL) # define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) +# define LDT_PGD_ENTRY _AC(-4, UL) +# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #endif #ifdef CONFIG_RANDOMIZE_MEMORY diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 9e482d8b0b9786..9c18da64daa920 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -851,13 +851,22 @@ static inline void spin_lock_prefetch(const void *x) #else /* - * User space process size. 47bits minus one guard page. The guard - * page is necessary on Intel CPUs: if a SYSCALL instruction is at - * the highest possible canonical userspace address, then that - * syscall will enter the kernel with a non-canonical return - * address, and SYSRET will explode dangerously. We avoid this - * particular problem by preventing anything from being mapped - * at the maximum canonical address. + * User space process size. This is the first address outside the user range. + * There are a few constraints that determine this: + * + * On Intel CPUs, if a SYSCALL instruction is at the highest canonical + * address, then that syscall will enter the kernel with a + * non-canonical return address, and SYSRET will explode dangerously. + * We avoid this particular problem by preventing anything executable + * from being mapped at the maximum canonical address. + * + * On AMD CPUs in the Ryzen family, there's a nasty bug in which the + * CPUs malfunction if they execute code from the highest canonical page. + * They'll speculate right off the end of the canonical space, and + * bad things happen. This is worked around in the same way as the + * Intel problem. + * + * With page table isolation enabled, we map the LDT in ... [stay tuned] */ #define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index a6b5d62f45a737..9629c5d8267a8e 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -51,13 +52,11 @@ static void refresh_ldt_segments(void) static void flush_ldt(void *__mm) { struct mm_struct *mm = __mm; - mm_context_t *pc; if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) return; - pc = &mm->context; - set_ldt(pc->ldt->entries, pc->ldt->nr_entries); + load_mm_ldt(mm); refresh_ldt_segments(); } @@ -94,10 +93,121 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries) return NULL; } + /* The new LDT isn't aliased for PTI yet. */ + new_ldt->slot = -1; + new_ldt->nr_entries = num_entries; return new_ldt; } +/* + * If PTI is enabled, this maps the LDT into the kernelmode and + * usermode tables for the given mm. + * + * There is no corresponding unmap function. Even if the LDT is freed, we + * leave the PTEs around until the slot is reused or the mm is destroyed. + * This is harmless: the LDT is always in ordinary memory, and no one will + * access the freed slot. + * + * If we wanted to unmap freed LDTs, we'd also need to do a flush to make + * it useful, and the flush would slow down modify_ldt(). + */ +static int +map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + bool is_vmalloc, had_top_level_entry; + unsigned long va; + spinlock_t *ptl; + pgd_t *pgd; + int i; + + if (!static_cpu_has(X86_FEATURE_PTI)) + return 0; + + /* + * Any given ldt_struct should have map_ldt_struct() called at most + * once. + */ + WARN_ON(ldt->slot != -1); + + /* + * Did we already have the top level entry allocated? We can't + * use pgd_none() for this because it doens't do anything on + * 4-level page table kernels. + */ + pgd = pgd_offset(mm, LDT_BASE_ADDR); + had_top_level_entry = (pgd->pgd != 0); + + is_vmalloc = is_vmalloc_addr(ldt->entries); + + for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) { + unsigned long offset = i << PAGE_SHIFT; + const void *src = (char *)ldt->entries + offset; + unsigned long pfn; + pte_t pte, *ptep; + + va = (unsigned long)ldt_slot_va(slot) + offset; + pfn = is_vmalloc ? vmalloc_to_pfn(src) : + page_to_pfn(virt_to_page(src)); + /* + * Treat the PTI LDT range as a *userspace* range. + * get_locked_pte() will allocate all needed pagetables + * and account for them in this mm. + */ + ptep = get_locked_pte(mm, va, &ptl); + if (!ptep) + return -ENOMEM; + pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL)); + set_pte_at(mm, va, ptep, pte); + pte_unmap_unlock(ptep, ptl); + } + + if (mm->context.ldt) { + /* + * We already had an LDT. The top-level entry should already + * have been allocated and synchronized with the usermode + * tables. + */ + WARN_ON(!had_top_level_entry); + if (static_cpu_has(X86_FEATURE_PTI)) + WARN_ON(!kernel_to_user_pgdp(pgd)->pgd); + } else { + /* + * This is the first time we're mapping an LDT for this process. + * Sync the pgd to the usermode tables. + */ + WARN_ON(had_top_level_entry); + if (static_cpu_has(X86_FEATURE_PTI)) { + WARN_ON(kernel_to_user_pgdp(pgd)->pgd); + set_pgd(kernel_to_user_pgdp(pgd), *pgd); + } + } + + va = (unsigned long)ldt_slot_va(slot); + flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0); + + ldt->slot = slot; +#endif + return 0; +} + +static void free_ldt_pgtables(struct mm_struct *mm) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + struct mmu_gather tlb; + unsigned long start = LDT_BASE_ADDR; + unsigned long end = start + (1UL << PGDIR_SHIFT); + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + tlb_gather_mmu(&tlb, mm, start, end); + free_pgd_range(&tlb, start, end, start, end); + tlb_finish_mmu(&tlb, start, end); +#endif +} + /* After calling this, the LDT is immutable. */ static void finalize_ldt_struct(struct ldt_struct *ldt) { @@ -156,6 +266,12 @@ int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm) new_ldt->nr_entries * LDT_ENTRY_SIZE); finalize_ldt_struct(new_ldt); + retval = map_ldt_struct(mm, new_ldt, 0); + if (retval) { + free_ldt_pgtables(mm); + free_ldt_struct(new_ldt); + goto out_unlock; + } mm->context.ldt = new_ldt; out_unlock: @@ -174,6 +290,11 @@ void destroy_context_ldt(struct mm_struct *mm) mm->context.ldt = NULL; } +void ldt_arch_exit_mmap(struct mm_struct *mm) +{ + free_ldt_pgtables(mm); +} + static int read_ldt(void __user *ptr, unsigned long bytecount) { struct mm_struct *mm = current->mm; @@ -287,6 +408,18 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) new_ldt->entries[ldt_info.entry_number] = ldt; finalize_ldt_struct(new_ldt); + /* + * If we are using PTI, map the new LDT into the userspace pagetables. + * If there is already an LDT, use the other slot so that other CPUs + * will continue to use the old LDT until install_ldt() switches + * them over to the new LDT. + */ + error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0); + if (error) { + free_ldt_struct(old_ldt); + goto out_unlock; + } + install_ldt(mm, new_ldt); free_ldt_struct(old_ldt); error = 0; diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 43dedbfb7257a3..690eaf31ca3421 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -52,11 +52,17 @@ enum address_markers_idx { USER_SPACE_NR = 0, KERNEL_SPACE_NR, LOW_KERNEL_NR, +#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL) + LDT_NR, +#endif VMALLOC_START_NR, VMEMMAP_START_NR, #ifdef CONFIG_KASAN KASAN_SHADOW_START_NR, KASAN_SHADOW_END_NR, +#endif +#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL) + LDT_NR, #endif CPU_ENTRY_AREA_NR, #ifdef CONFIG_X86_ESPFIX64 @@ -81,6 +87,9 @@ static struct addr_marker address_markers[] = { #ifdef CONFIG_KASAN [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" }, [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, +#endif +#ifdef CONFIG_MODIFY_LDT_SYSCALL + [LDT_NR] = { LDT_BASE_ADDR, "LDT remap" }, #endif [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" }, #ifdef CONFIG_X86_ESPFIX64 From 9617ee896217558a2488b6dc71968a4489fb18b6 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Tue, 12 Dec 2017 07:56:42 -0800 Subject: [PATCH 1077/3326] x86/pti: Map the vsyscall page if needed commit 85900ea51577e31b186e523c8f4e068c79ecc7d3 upstream. Make VSYSCALLs work fully in PTI mode by mapping them properly to the user space visible page tables. [ tglx: Hide unused functions (Patch by Arnd Bergmann) ] Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Kees Cook Cc: Linus Torvalds Cc: Peter Zijlstra Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/vsyscall/vsyscall_64.c | 6 +-- arch/x86/include/asm/vsyscall.h | 1 + arch/x86/mm/pti.c | 65 +++++++++++++++++++++++++++ 3 files changed, 69 insertions(+), 3 deletions(-) diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 1faf40f2dda9a8..577fa8adb785ba 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -344,14 +344,14 @@ int in_gate_area_no_mm(unsigned long addr) * vsyscalls but leave the page not present. If so, we skip calling * this. */ -static void __init set_vsyscall_pgtable_user_bits(void) +void __init set_vsyscall_pgtable_user_bits(pgd_t *root) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; - pgd = pgd_offset_k(VSYSCALL_ADDR); + pgd = pgd_offset_pgd(root, VSYSCALL_ADDR); set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); p4d = p4d_offset(pgd, VSYSCALL_ADDR); #if CONFIG_PGTABLE_LEVELS >= 5 @@ -373,7 +373,7 @@ void __init map_vsyscall(void) vsyscall_mode == NATIVE ? PAGE_KERNEL_VSYSCALL : PAGE_KERNEL_VVAR); - set_vsyscall_pgtable_user_bits(); + set_vsyscall_pgtable_user_bits(swapper_pg_dir); } BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index d9a7c659009c94..b986b2ca688a0e 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h @@ -7,6 +7,7 @@ #ifdef CONFIG_X86_VSYSCALL_EMULATION extern void map_vsyscall(void); +extern void set_vsyscall_pgtable_user_bits(pgd_t *root); /* * Called on instruction fetch fault in vsyscall page. diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index b1c38ef9fbbb78..bce8aea6560621 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -38,6 +38,7 @@ #include #include +#include #include #include #include @@ -223,6 +224,69 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) return pmd_offset(pud, address); } +#ifdef CONFIG_X86_VSYSCALL_EMULATION +/* + * Walk the shadow copy of the page tables (optionally) trying to allocate + * page table pages on the way down. Does not support large pages. + * + * Note: this is only used when mapping *new* kernel data into the + * user/shadow page tables. It is never used for userspace data. + * + * Returns a pointer to a PTE on success, or NULL on failure. + */ +static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) +{ + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + pmd_t *pmd = pti_user_pagetable_walk_pmd(address); + pte_t *pte; + + /* We can't do anything sensible if we hit a large mapping. */ + if (pmd_large(*pmd)) { + WARN_ON(1); + return NULL; + } + + if (pmd_none(*pmd)) { + unsigned long new_pte_page = __get_free_page(gfp); + if (!new_pte_page) + return NULL; + + if (pmd_none(*pmd)) { + set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page))); + new_pte_page = 0; + } + if (new_pte_page) + free_page(new_pte_page); + } + + pte = pte_offset_kernel(pmd, address); + if (pte_flags(*pte) & _PAGE_USER) { + WARN_ONCE(1, "attempt to walk to user pte\n"); + return NULL; + } + return pte; +} + +static void __init pti_setup_vsyscall(void) +{ + pte_t *pte, *target_pte; + unsigned int level; + + pte = lookup_address(VSYSCALL_ADDR, &level); + if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) + return; + + target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); + if (WARN_ON(!target_pte)) + return; + + *target_pte = *pte; + set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir)); +} +#else +static void __init pti_setup_vsyscall(void) { } +#endif + static void __init pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear) { @@ -319,4 +383,5 @@ void __init pti_init(void) pti_clone_user_shared(); pti_clone_entry_text(); pti_setup_espfix64(); + pti_setup_vsyscall(); } From c796e2324094f098575e47ec6d19f22cc4a4f9b9 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 4 Dec 2017 15:07:57 +0100 Subject: [PATCH 1078/3326] x86/mm: Allow flushing for future ASID switches commit 2ea907c4fe7b78e5840c1dc07800eae93248cad1 upstream. If changing the page tables in such a way that an invalidation of all contexts (aka. PCIDs / ASIDs) is required, they can be actively invalidated by: 1. INVPCID for each PCID (works for single pages too). 2. Load CR3 with each PCID without the NOFLUSH bit set 3. Load CR3 with the NOFLUSH bit set for each and do INVLPG for each address. But, none of these are really feasible since there are ~6 ASIDs (12 with PAGE_TABLE_ISOLATION) at the time that invalidation is required. Instead of actively invalidating them, invalidate the *current* context and also mark the cpu_tlbstate _quickly_ to indicate future invalidation to be required. At the next context-switch, look for this indicator ('invalidate_other' being set) invalidate all of the cpu_tlbstate.ctxs[] entries. This ensures that any future context switches will do a full flush of the TLB, picking up the previous changes. [ tglx: Folded more fixups from Peter ] Signed-off-by: Dave Hansen Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/tlbflush.h | 37 ++++++++++++++++++++++++++------- arch/x86/mm/tlb.c | 35 +++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 171b429f43a266..490a706fdba810 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -134,6 +134,17 @@ struct tlb_state { */ bool is_lazy; + /* + * If set we changed the page tables in such a way that we + * needed an invalidation of all contexts (aka. PCIDs / ASIDs). + * This tells us to go invalidate all the non-loaded ctxs[] + * on the next context switch. + * + * The current ctx was kept up-to-date as it ran and does not + * need to be invalidated. + */ + bool invalidate_other; + /* * Access to this CR4 shadow and to H/W CR4 is protected by * disabling interrupts when modifying either one. @@ -211,6 +222,14 @@ static inline unsigned long cr4_read_shadow(void) return this_cpu_read(cpu_tlbstate.cr4); } +/* + * Mark all other ASIDs as invalid, preserves the current. + */ +static inline void invalidate_other_asid(void) +{ + this_cpu_write(cpu_tlbstate.invalidate_other, true); +} + /* * Save some of cr4 feature set we're using (e.g. Pentium 4MB * enable and PPro Global page enable), so that any CPU's that boot @@ -298,14 +317,6 @@ static inline void __flush_tlb_all(void) */ __flush_tlb(); } - - /* - * Note: if we somehow had PCID but not PGE, then this wouldn't work -- - * we'd end up flushing kernel translations for the current ASID but - * we might fail to flush kernel translations for other cached ASIDs. - * - * To avoid this issue, we force PCID off if PGE is off. - */ } /* @@ -315,6 +326,16 @@ static inline void __flush_tlb_one(unsigned long addr) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + /* + * __flush_tlb_single() will have cleared the TLB entry for this ASID, + * but since kernel space is replicated across all, we must also + * invalidate all others. + */ + invalidate_other_asid(); } #define TLB_FLUSH_ALL -1UL diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 0a1be3adc97eee..254c9eb79fe5e7 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -28,6 +28,38 @@ * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi */ +/* + * We get here when we do something requiring a TLB invalidation + * but could not go invalidate all of the contexts. We do the + * necessary invalidation by clearing out the 'ctx_id' which + * forces a TLB flush when the context is loaded. + */ +void clear_asid_other(void) +{ + u16 asid; + + /* + * This is only expected to be set if we have disabled + * kernel _PAGE_GLOBAL pages. + */ + if (!static_cpu_has(X86_FEATURE_PTI)) { + WARN_ON_ONCE(1); + return; + } + + for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { + /* Do not need to flush the current asid */ + if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid)) + continue; + /* + * Make sure the next time we go to switch to + * this asid, we do a flush: + */ + this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0); + } + this_cpu_write(cpu_tlbstate.invalidate_other, false); +} + atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); @@ -42,6 +74,9 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, return; } + if (this_cpu_read(cpu_tlbstate.invalidate_other)) + clear_asid_other(); + for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != next->context.ctx_id) From 954339c41cceca991c0aec6dd3b7e164c5b9f48b Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 4 Dec 2017 15:07:58 +0100 Subject: [PATCH 1079/3326] x86/mm: Abstract switching CR3 commit 48e111982cda033fec832c6b0592c2acedd85d04 upstream. In preparation to adding additional PCID flushing, abstract the loading of a new ASID into CR3. [ PeterZ: Split out from big combo patch ] Signed-off-by: Dave Hansen Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/tlb.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 254c9eb79fe5e7..42a8875f73fec9 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -100,6 +100,24 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, *need_flush = true; } +static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush) +{ + unsigned long new_mm_cr3; + + if (need_flush) { + new_mm_cr3 = build_cr3(pgdir, new_asid); + } else { + new_mm_cr3 = build_cr3_noflush(pgdir, new_asid); + } + + /* + * Caution: many callers of this function expect + * that load_cr3() is serializing and orders TLB + * fills with respect to the mm_cpumask writes. + */ + write_cr3(new_mm_cr3); +} + void leave_mm(int cpu) { struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); @@ -230,7 +248,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, if (need_flush) { this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); - write_cr3(build_cr3(next->pgd, new_asid)); + load_new_mm_cr3(next->pgd, new_asid, true); /* * NB: This gets called via leave_mm() in the idle path @@ -243,7 +261,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } else { /* The new ASID is already up to date. */ - write_cr3(build_cr3_noflush(next->pgd, new_asid)); + load_new_mm_cr3(next->pgd, new_asid, false); /* See above wrt _rcuidle. */ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); From b63812b81349e5d1a35107e2464547187bc25a61 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 4 Dec 2017 15:07:59 +0100 Subject: [PATCH 1080/3326] x86/mm: Use/Fix PCID to optimize user/kernel switches commit 6fd166aae78c0ab738d49bda653cbd9e3b1491cf upstream. We can use PCID to retain the TLBs across CR3 switches; including those now part of the user/kernel switch. This increases performance of kernel entry/exit at the cost of more expensive/complicated TLB flushing. Now that we have two address spaces, one for kernel and one for user space, we need two PCIDs per mm. We use the top PCID bit to indicate a user PCID (just like we use the PFN LSB for the PGD). Since we do TLB invalidation from kernel space, the existing code will only invalidate the kernel PCID, we augment that by marking the corresponding user PCID invalid, and upon switching back to userspace, use a flushing CR3 write for the switch. In order to access the user_pcid_flush_mask we use PER_CPU storage, which means the previously established SWAPGS vs CR3 ordering is now mandatory and required. Having to do this memory access does require additional registers, most sites have a functioning stack and we can spill one (RAX), sites without functional stack need to otherwise provide the second scratch register. Note: PCID is generally available on Intel Sandybridge and later CPUs. Note: Up until this point TLB flushing was broken in this series. Based-on-code-from: Dave Hansen Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 72 +++++++++++++--- arch/x86/entry/entry_64.S | 9 +- arch/x86/entry/entry_64_compat.S | 4 +- arch/x86/include/asm/processor-flags.h | 5 ++ arch/x86/include/asm/tlbflush.h | 91 ++++++++++++++++++--- arch/x86/include/uapi/asm/processor-flags.h | 7 +- arch/x86/kernel/asm-offsets.c | 4 + arch/x86/mm/init.c | 2 +- arch/x86/mm/tlb.c | 1 + 9 files changed, 162 insertions(+), 33 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 3d3389a92c3395..7894e5c0eef735 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -3,6 +3,9 @@ #include #include #include +#include +#include +#include /* @@ -191,17 +194,21 @@ For 32-bit we have the following conventions - kernel is built with #ifdef CONFIG_PAGE_TABLE_ISOLATION -/* PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two halves: */ -#define PTI_SWITCH_MASK (1< #include #include -#include "calling.h" #include #include #include @@ -40,6 +39,8 @@ #include #include +#include "calling.h" + .code64 .section .entry.text, "ax" @@ -406,7 +407,7 @@ syscall_return_via_sysret: * We are on the trampoline stack. All regs except RDI are live. * We can do future final exit work right here. */ - SWITCH_TO_USER_CR3 scratch_reg=%rdi + SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi popq %rdi popq %rsp @@ -744,7 +745,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) * We can do future final exit work right here. */ - SWITCH_TO_USER_CR3 scratch_reg=%rdi + SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi /* Restore RDI. */ popq %rdi @@ -857,7 +858,7 @@ native_irq_return_ldt: */ orq PER_CPU_VAR(espfix_stack), %rax - SWITCH_TO_USER_CR3 scratch_reg=%rdi /* to user CR3 */ + SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi SWAPGS /* to user GS */ popq %rdi /* Restore user RDI */ diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 05238b29895eea..40f17009ec20cd 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -275,9 +275,9 @@ sysret32_from_system_call: * switch until after after the last reference to the process * stack. * - * %r8 is zeroed before the sysret, thus safe to clobber. + * %r8/%r9 are zeroed before the sysret, thus safe to clobber. */ - SWITCH_TO_USER_CR3 scratch_reg=%r8 + SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9 xorq %r8, %r8 xorq %r9, %r9 diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h index 43212a43ee69fe..6a60fea90b9d9d 100644 --- a/arch/x86/include/asm/processor-flags.h +++ b/arch/x86/include/asm/processor-flags.h @@ -38,6 +38,11 @@ #define CR3_ADDR_MASK __sme_clr(0x7FFFFFFFFFFFF000ull) #define CR3_PCID_MASK 0xFFFull #define CR3_NOFLUSH BIT_ULL(63) + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +# define X86_CR3_PTI_SWITCH_BIT 11 +#endif + #else /* * CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 490a706fdba810..5dcc38b16604d9 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -10,6 +10,8 @@ #include #include #include +#include +#include static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) { @@ -24,24 +26,54 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) /* There are 12 bits of space for ASIDS in CR3 */ #define CR3_HW_ASID_BITS 12 + /* * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for * user/kernel switches */ -#define PTI_CONSUMED_ASID_BITS 0 +#ifdef CONFIG_PAGE_TABLE_ISOLATION +# define PTI_CONSUMED_PCID_BITS 1 +#else +# define PTI_CONSUMED_PCID_BITS 0 +#endif + +#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS) -#define CR3_AVAIL_ASID_BITS (CR3_HW_ASID_BITS - PTI_CONSUMED_ASID_BITS) /* * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account * for them being zero-based. Another -1 is because ASID 0 is reserved for * use by non-PCID-aware users. */ -#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_ASID_BITS) - 2) +#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2) + +/* + * 6 because 6 should be plenty and struct tlb_state will fit in two cache + * lines. + */ +#define TLB_NR_DYN_ASIDS 6 static inline u16 kern_pcid(u16 asid) { VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); + +#ifdef CONFIG_PAGE_TABLE_ISOLATION + /* + * Make sure that the dynamic ASID space does not confict with the + * bit we are using to switch between user and kernel ASIDs. + */ + BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_SWITCH_BIT)); + /* + * The ASID being passed in here should have respected the + * MAX_ASID_AVAILABLE and thus never have the switch bit set. + */ + VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_SWITCH_BIT)); +#endif + /* + * The dynamically-assigned ASIDs that get passed in are small + * (mm == NULL then we borrow a mm which may change during a - * task switch and therefore we must not be preempted while we write CR3 - * back: + * If current->mm == NULL then we borrow a mm which may change + * during a task switch and therefore we must not be preempted + * while we write CR3 back: */ preempt_disable(); native_write_cr3(__native_read_cr3()); @@ -301,7 +361,14 @@ static inline void __native_flush_tlb_global(void) */ static inline void __native_flush_tlb_single(unsigned long addr) { + u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); + asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + invalidate_user_asid(loaded_mm_asid); } /* diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h index 53b4ca55ebb66e..97abdaab953570 100644 --- a/arch/x86/include/uapi/asm/processor-flags.h +++ b/arch/x86/include/uapi/asm/processor-flags.h @@ -78,7 +78,12 @@ #define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT) #define X86_CR3_PCD_BIT 4 /* Page Cache Disable */ #define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT) -#define X86_CR3_PCID_MASK _AC(0x00000fff,UL) /* PCID Mask */ + +#define X86_CR3_PCID_BITS 12 +#define X86_CR3_PCID_MASK (_AC((1UL << X86_CR3_PCID_BITS) - 1, UL)) + +#define X86_CR3_PCID_NOFLUSH_BIT 63 /* Preserve old PCID */ +#define X86_CR3_PCID_NOFLUSH _BITULL(X86_CR3_PCID_NOFLUSH_BIT) /* * Intel CPU features in CR4 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 676b7cf4b62bf8..76417a9aab73c3 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -17,6 +17,7 @@ #include #include #include +#include #ifdef CONFIG_XEN #include @@ -94,6 +95,9 @@ void common(void) { BLANK(); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); + /* TLB state for the entry code */ + OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask); + /* Layout info for cpu_entry_area */ OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss); OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index af75069fb11605..caeb8a7bf0a4e7 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -855,7 +855,7 @@ void __init zone_sizes_init(void) free_area_init_nodes(max_zone_pfns); } -DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { +__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { .loaded_mm = &init_mm, .next_asid = 1, .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 42a8875f73fec9..a1561957dccbb8 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -105,6 +105,7 @@ static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush) unsigned long new_mm_cr3; if (need_flush) { + invalidate_user_asid(new_asid); new_mm_cr3 = build_cr3(pgdir, new_asid); } else { new_mm_cr3 = build_cr3_noflush(pgdir, new_asid); From 36a72ab52c8d969a7a302082f52731c1be0e9ada Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 4 Dec 2017 15:08:00 +0100 Subject: [PATCH 1081/3326] x86/mm: Optimize RESTORE_CR3 commit 21e94459110252d41b45c0c8ba50fd72a664d50c upstream. Most NMI/paranoid exceptions will not in fact change pagetables and would thus not require TLB flushing, however RESTORE_CR3 uses flushing CR3 writes. Restores to kernel PCIDs can be NOFLUSH, because we explicitly flush the kernel mappings and now that we track which user PCIDs need flushing we can avoid those too when possible. This does mean RESTORE_CR3 needs an additional scratch_reg, luckily both sites have plenty available. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 30 ++++++++++++++++++++++++++++-- arch/x86/entry/entry_64.S | 4 ++-- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 7894e5c0eef735..45a63e00a6af9a 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -281,8 +281,34 @@ For 32-bit we have the following conventions - kernel is built with .Ldone_\@: .endm -.macro RESTORE_CR3 save_reg:req +.macro RESTORE_CR3 scratch_reg:req save_reg:req ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI + + ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID + + /* + * KERNEL pages can always resume with NOFLUSH as we do + * explicit flushes. + */ + bt $X86_CR3_PTI_SWITCH_BIT, \save_reg + jnc .Lnoflush_\@ + + /* + * Check if there's a pending flush for the user ASID we're + * about to set. + */ + movq \save_reg, \scratch_reg + andq $(0x7FF), \scratch_reg + bt \scratch_reg, THIS_CPU_user_pcid_flush_mask + jnc .Lnoflush_\@ + + btr \scratch_reg, THIS_CPU_user_pcid_flush_mask + jmp .Lwrcr3_\@ + +.Lnoflush_\@: + SET_NOFLUSH_BIT \save_reg + +.Lwrcr3_\@: /* * The CR3 write could be avoided when not changing its value, * but would require a CR3 read *and* a scratch register. @@ -301,7 +327,7 @@ For 32-bit we have the following conventions - kernel is built with .endm .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req .endm -.macro RESTORE_CR3 save_reg:req +.macro RESTORE_CR3 scratch_reg:req save_reg:req .endm #endif diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index e15d4ba19b3d3e..dd696b966e5835 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1288,7 +1288,7 @@ ENTRY(paranoid_exit) testl %ebx, %ebx /* swapgs needed? */ jnz .Lparanoid_exit_no_swapgs TRACE_IRQS_IRETQ - RESTORE_CR3 save_reg=%r14 + RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 SWAPGS_UNSAFE_STACK jmp .Lparanoid_exit_restore .Lparanoid_exit_no_swapgs: @@ -1730,7 +1730,7 @@ end_repeat_nmi: movq $-1, %rsi call do_nmi - RESTORE_CR3 save_reg=%r14 + RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 testl %ebx, %ebx /* swapgs needed? */ jnz nmi_restore From c5548af97ae98f9d9e6ae5a9a005e605bd3c06b5 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 4 Dec 2017 15:08:01 +0100 Subject: [PATCH 1082/3326] x86/mm: Use INVPCID for __native_flush_tlb_single() commit 6cff64b86aaaa07f89f50498055a20e45754b0c1 upstream. This uses INVPCID to shoot down individual lines of the user mapping instead of marking the entire user map as invalid. This could/might/possibly be faster. This for sure needs tlb_single_page_flush_ceiling to be redetermined; esp. since INVPCID is _slow_. A detailed performance analysis is available here: https://lkml.kernel.org/r/3062e486-3539-8a1f-5724-16199420be71@intel.com [ Peterz: Split out from big combo patch ] Signed-off-by: Dave Hansen Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/tlbflush.h | 23 ++++++++++- arch/x86/mm/init.c | 64 +++++++++++++++++------------- 3 files changed, 60 insertions(+), 28 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index d8ec834ea88400..07cdd17157050e 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -197,6 +197,7 @@ #define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ #define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ #define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ +#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 5dcc38b16604d9..57072a1052fe2a 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -85,6 +85,18 @@ static inline u16 kern_pcid(u16 asid) return asid + 1; } +/* + * The user PCID is just the kernel one, plus the "switch bit". + */ +static inline u16 user_pcid(u16 asid) +{ + u16 ret = kern_pcid(asid); +#ifdef CONFIG_PAGE_TABLE_ISOLATION + ret |= 1 << X86_CR3_PTI_SWITCH_BIT; +#endif + return ret; +} + struct pgd_t; static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) { @@ -335,6 +347,8 @@ static inline void __native_flush_tlb_global(void) /* * Using INVPCID is considerably faster than a pair of writes * to CR4 sandwiched inside an IRQ flag save/restore. + * + * Note, this works with CR4.PCIDE=0 or 1. */ invpcid_flush_all(); return; @@ -368,7 +382,14 @@ static inline void __native_flush_tlb_single(unsigned long addr) if (!static_cpu_has(X86_FEATURE_PTI)) return; - invalidate_user_asid(loaded_mm_asid); + /* + * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1. + * Just use invalidate_user_asid() in case we are called early. + */ + if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) + invalidate_user_asid(loaded_mm_asid); + else + invpcid_flush_one(user_pcid(loaded_mm_asid), addr); } /* diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index caeb8a7bf0a4e7..80259ad8c38601 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -203,34 +203,44 @@ static void __init probe_page_size_mask(void) static void setup_pcid(void) { -#ifdef CONFIG_X86_64 - if (boot_cpu_has(X86_FEATURE_PCID)) { - if (boot_cpu_has(X86_FEATURE_PGE)) { - /* - * This can't be cr4_set_bits_and_update_boot() -- - * the trampoline code can't handle CR4.PCIDE and - * it wouldn't do any good anyway. Despite the name, - * cr4_set_bits_and_update_boot() doesn't actually - * cause the bits in question to remain set all the - * way through the secondary boot asm. - * - * Instead, we brute-force it and set CR4.PCIDE - * manually in start_secondary(). - */ - cr4_set_bits(X86_CR4_PCIDE); - } else { - /* - * flush_tlb_all(), as currently implemented, won't - * work if PCID is on but PGE is not. Since that - * combination doesn't exist on real hardware, there's - * no reason to try to fully support it, but it's - * polite to avoid corrupting data if we're on - * an improperly configured VM. - */ - setup_clear_cpu_cap(X86_FEATURE_PCID); - } + if (!IS_ENABLED(CONFIG_X86_64)) + return; + + if (!boot_cpu_has(X86_FEATURE_PCID)) + return; + + if (boot_cpu_has(X86_FEATURE_PGE)) { + /* + * This can't be cr4_set_bits_and_update_boot() -- the + * trampoline code can't handle CR4.PCIDE and it wouldn't + * do any good anyway. Despite the name, + * cr4_set_bits_and_update_boot() doesn't actually cause + * the bits in question to remain set all the way through + * the secondary boot asm. + * + * Instead, we brute-force it and set CR4.PCIDE manually in + * start_secondary(). + */ + cr4_set_bits(X86_CR4_PCIDE); + + /* + * INVPCID's single-context modes (2/3) only work if we set + * X86_CR4_PCIDE, *and* we INVPCID support. It's unusable + * on systems that have X86_CR4_PCIDE clear, or that have + * no INVPCID support at all. + */ + if (boot_cpu_has(X86_FEATURE_INVPCID)) + setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE); + } else { + /* + * flush_tlb_all(), as currently implemented, won't work if + * PCID is on but PGE is not. Since that combination + * doesn't exist on real hardware, there's no reason to try + * to fully support it, but it's polite to avoid corrupting + * data if we're on an improperly configured VM. + */ + setup_clear_cpu_cap(X86_FEATURE_PCID); } -#endif } #ifdef CONFIG_X86_32 From ef4b38472d6b1bf587554dfc7d5ab7abc835c1a5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 5 Dec 2017 13:34:53 +0100 Subject: [PATCH 1083/3326] x86/mm: Clarify the whole ASID/kernel PCID/user PCID naming commit 0a126abd576ebc6403f063dbe20cf7416c9d9393 upstream. Ideally we'd also use sparse to enforce this separation so it becomes much more difficult to mess up. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/tlbflush.h | 55 ++++++++++++++++++++++++++------- 1 file changed, 43 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 57072a1052fe2a..b519da4fc03c7c 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -13,16 +13,33 @@ #include #include -static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) -{ - /* - * Bump the generation count. This also serves as a full barrier - * that synchronizes with switch_mm(): callers are required to order - * their read of mm_cpumask after their writes to the paging - * structures. - */ - return atomic64_inc_return(&mm->context.tlb_gen); -} +/* + * The x86 feature is called PCID (Process Context IDentifier). It is similar + * to what is traditionally called ASID on the RISC processors. + * + * We don't use the traditional ASID implementation, where each process/mm gets + * its own ASID and flush/restart when we run out of ASID space. + * + * Instead we have a small per-cpu array of ASIDs and cache the last few mm's + * that came by on this CPU, allowing cheaper switch_mm between processes on + * this CPU. + * + * We end up with different spaces for different things. To avoid confusion we + * use different names for each of them: + * + * ASID - [0, TLB_NR_DYN_ASIDS-1] + * the canonical identifier for an mm + * + * kPCID - [1, TLB_NR_DYN_ASIDS] + * the value we write into the PCID part of CR3; corresponds to the + * ASID+1, because PCID 0 is special. + * + * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS] + * for KPTI each mm has two address spaces and thus needs two + * PCID values, but we can still do with a single ASID denomination + * for each mm. Corresponds to kPCID + 2048. + * + */ /* There are 12 bits of space for ASIDS in CR3 */ #define CR3_HW_ASID_BITS 12 @@ -41,7 +58,7 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) /* * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account - * for them being zero-based. Another -1 is because ASID 0 is reserved for + * for them being zero-based. Another -1 is because PCID 0 is reserved for * use by non-PCID-aware users. */ #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2) @@ -52,6 +69,9 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) */ #define TLB_NR_DYN_ASIDS 6 +/* + * Given @asid, compute kPCID + */ static inline u16 kern_pcid(u16 asid) { VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); @@ -86,7 +106,7 @@ static inline u16 kern_pcid(u16 asid) } /* - * The user PCID is just the kernel one, plus the "switch bit". + * Given @asid, compute uPCID */ static inline u16 user_pcid(u16 asid) { @@ -484,6 +504,17 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) void native_flush_tlb_others(const struct cpumask *cpumask, const struct flush_tlb_info *info); +static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) +{ + /* + * Bump the generation count. This also serves as a full barrier + * that synchronizes with switch_mm(): callers are required to order + * their read of mm_cpumask after their writes to the paging + * structures. + */ + return atomic64_inc_return(&mm->context.tlb_gen); +} + static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, struct mm_struct *mm) { From 33d9d7836f0fa02777667d72bc815c12fbe61cac Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 19 Dec 2017 22:33:46 +0100 Subject: [PATCH 1084/3326] x86/dumpstack: Indicate in Oops whether PTI is configured and enabled commit 5f26d76c3fd67c48806415ef8b1116c97beff8ba upstream. CONFIG_PAGE_TABLE_ISOLATION is relatively new and intrusive feature that may still have some corner cases which could take some time to manifest and be fixed. It would be useful to have Oops messages indicate whether it was enabled for building the kernel, and whether it was disabled during boot. Example of fully enabled: Oops: 0001 [#1] SMP PTI Example of enabled during build, but disabled during boot: Oops: 0001 [#1] SMP NOPTI We can decide to remove this after the feature has been tested in the field long enough. [ tglx: Made it use boot_cpu_has() as requested by Borislav ] Signed-off-by: Vlastimil Babka Signed-off-by: Thomas Gleixner Reviewed-by: Eduardo Valentin Acked-by: Dave Hansen Cc: Andy Lutomirski Cc: Andy Lutomirsky Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: bpetkov@suse.de Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: jkosina@suse.cz Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/dumpstack.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 36b17e0febe862..5fa110699ed275 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -297,11 +297,13 @@ int __die(const char *str, struct pt_regs *regs, long err) unsigned long sp; #endif printk(KERN_DEFAULT - "%s: %04lx [#%d]%s%s%s%s\n", str, err & 0xffff, ++die_counter, + "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter, IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", IS_ENABLED(CONFIG_SMP) ? " SMP" : "", debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", - IS_ENABLED(CONFIG_KASAN) ? " KASAN" : ""); + IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "", + IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ? + (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : ""); if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) From 3dfd9fd8d897214b1a880c7fd8ed36b88faa1c02 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Mon, 4 Dec 2017 15:08:03 +0100 Subject: [PATCH 1085/3326] x86/mm/pti: Add Kconfig commit 385ce0ea4c078517fa51c261882c4e72fba53005 upstream. Finally allow CONFIG_PAGE_TABLE_ISOLATION to be enabled. PARAVIRT generally requires that the kernel not manage its own page tables. It also means that the hypervisor and kernel must agree wholeheartedly about what format the page tables are in and what they contain. PAGE_TABLE_ISOLATION, unfortunately, changes the rules and they can not be used together. I've seen conflicting feedback from maintainers lately about whether they want the Kconfig magic to go first or last in a patch series. It's going last here because the partially-applied series leads to kernels that can not boot in a bunch of cases. I did a run through the entire series with CONFIG_PAGE_TABLE_ISOLATION=y to look for build errors, though. [ tglx: Removed SMP and !PARAVIRT dependencies as they not longer exist ] Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- security/Kconfig | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/security/Kconfig b/security/Kconfig index e8e449444e658b..6614b9312b45ef 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -54,6 +54,17 @@ config SECURITY_NETWORK implement socket and networking access controls. If you are unsure how to answer this question, answer N. +config PAGE_TABLE_ISOLATION + bool "Remove the kernel mapping in user mode" + depends on X86_64 && !UML + default y + help + This feature reduces the number of hardware side channels by + ensuring that the majority of kernel addresses are not mapped + into userspace. + + See Documentation/x86/pagetable-isolation.txt for more details. + config SECURITY_INFINIBAND bool "Infiniband Security Hooks" depends on SECURITY && INFINIBAND From dfa58126d7630c0db0b64fa7bd15741293b517b5 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 4 Dec 2017 15:08:04 +0100 Subject: [PATCH 1086/3326] x86/mm/dump_pagetables: Add page table directory to the debugfs VFS hierarchy commit 75298aa179d56cd64f54e58a19fffc8ab922b4c0 upstream. The upcoming support for dumping the kernel and the user space page tables of the current process would create more random files in the top level debugfs directory. Add a page table directory and move the existing file to it. Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/debug_pagetables.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c index bfcffdf6c5775f..d1449fb6dc7a0d 100644 --- a/arch/x86/mm/debug_pagetables.c +++ b/arch/x86/mm/debug_pagetables.c @@ -22,21 +22,26 @@ static const struct file_operations ptdump_fops = { .release = single_release, }; -static struct dentry *pe; +static struct dentry *dir, *pe; static int __init pt_dump_debug_init(void) { - pe = debugfs_create_file("kernel_page_tables", S_IRUSR, NULL, NULL, - &ptdump_fops); - if (!pe) + dir = debugfs_create_dir("page_tables", NULL); + if (!dir) return -ENOMEM; + pe = debugfs_create_file("kernel", 0400, dir, NULL, &ptdump_fops); + if (!pe) + goto err; return 0; +err: + debugfs_remove_recursive(dir); + return -ENOMEM; } static void __exit pt_dump_debug_exit(void) { - debugfs_remove_recursive(pe); + debugfs_remove_recursive(dir); } module_init(pt_dump_debug_init); From 27e16c33bb796cceb92e339998d30145076b43cd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 4 Dec 2017 15:08:05 +0100 Subject: [PATCH 1087/3326] x86/mm/dump_pagetables: Check user space page table for WX pages commit b4bf4f924b1d7bade38fd51b2e401d20d0956e4d upstream. ptdump_walk_pgd_level_checkwx() checks the kernel page table for WX pages, but does not check the PAGE_TABLE_ISOLATION user space page table. Restructure the code so that dmesg output is selected by an explicit argument and not implicit via checking the pgd argument for !NULL. Add the check for the user space page table. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/pgtable.h | 1 + arch/x86/mm/debug_pagetables.c | 2 +- arch/x86/mm/dump_pagetables.c | 30 +++++++++++++++++++++++++----- 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 9aaeea68783469..6b8bd9016eeb73 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -28,6 +28,7 @@ extern pgd_t early_top_pgt[PTRS_PER_PGD]; int __init __early_make_pgtable(unsigned long address, pmdval_t pmd); void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); +void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd); void ptdump_walk_pgd_level_checkwx(void); #ifdef CONFIG_DEBUG_WX diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c index d1449fb6dc7a0d..8e70c1599e517f 100644 --- a/arch/x86/mm/debug_pagetables.c +++ b/arch/x86/mm/debug_pagetables.c @@ -5,7 +5,7 @@ static int ptdump_show(struct seq_file *m, void *v) { - ptdump_walk_pgd_level(m, NULL); + ptdump_walk_pgd_level_debugfs(m, NULL); return 0; } diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 690eaf31ca3421..17f5b417f95e9e 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -476,7 +476,7 @@ static inline bool is_hypervisor_range(int idx) } static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, - bool checkwx) + bool checkwx, bool dmesg) { #ifdef CONFIG_X86_64 pgd_t *start = (pgd_t *) &init_top_pgt; @@ -489,7 +489,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, if (pgd) { start = pgd; - st.to_dmesg = true; + st.to_dmesg = dmesg; } st.check_wx = checkwx; @@ -527,13 +527,33 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd) { - ptdump_walk_pgd_level_core(m, pgd, false); + ptdump_walk_pgd_level_core(m, pgd, false, true); +} + +void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd) +{ + ptdump_walk_pgd_level_core(m, pgd, false, false); +} +EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs); + +static void ptdump_walk_user_pgd_level_checkwx(void) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + pgd_t *pgd = (pgd_t *) &init_top_pgt; + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + pr_info("x86/mm: Checking user space page tables\n"); + pgd = kernel_to_user_pgdp(pgd); + ptdump_walk_pgd_level_core(NULL, pgd, true, false); +#endif } -EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level); void ptdump_walk_pgd_level_checkwx(void) { - ptdump_walk_pgd_level_core(NULL, NULL, true); + ptdump_walk_pgd_level_core(NULL, NULL, true, false); + ptdump_walk_user_pgd_level_checkwx(); } static int __init pt_dump_init(void) From 704cfa04dde3a35cde59a67cafbcaeacbb3a76c8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 4 Dec 2017 15:08:06 +0100 Subject: [PATCH 1088/3326] x86/mm/dump_pagetables: Allow dumping current pagetables commit a4b51ef6552c704764684cef7e753162dc87c5fa upstream. Add two debugfs files which allow to dump the pagetable of the current task. current_kernel dumps the regular page table. This is the page table which is normally shared between kernel and user space. If kernel page table isolation is enabled this is the kernel space mapping. If kernel page table isolation is enabled the second file, current_user, dumps the user space page table. These files allow to verify the resulting page tables for page table isolation, but even in the normal case its useful to be able to inspect user space page tables of current for debugging purposes. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/pgtable.h | 2 +- arch/x86/mm/debug_pagetables.c | 71 ++++++++++++++++++++++++++++++++-- arch/x86/mm/dump_pagetables.c | 6 ++- 3 files changed, 73 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 6b8bd9016eeb73..211368922cad67 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -28,7 +28,7 @@ extern pgd_t early_top_pgt[PTRS_PER_PGD]; int __init __early_make_pgtable(unsigned long address, pmdval_t pmd); void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); -void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd); +void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user); void ptdump_walk_pgd_level_checkwx(void); #ifdef CONFIG_DEBUG_WX diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c index 8e70c1599e517f..421f2664ffa06e 100644 --- a/arch/x86/mm/debug_pagetables.c +++ b/arch/x86/mm/debug_pagetables.c @@ -5,7 +5,7 @@ static int ptdump_show(struct seq_file *m, void *v) { - ptdump_walk_pgd_level_debugfs(m, NULL); + ptdump_walk_pgd_level_debugfs(m, NULL, false); return 0; } @@ -22,7 +22,57 @@ static const struct file_operations ptdump_fops = { .release = single_release, }; -static struct dentry *dir, *pe; +static int ptdump_show_curknl(struct seq_file *m, void *v) +{ + if (current->mm->pgd) { + down_read(¤t->mm->mmap_sem); + ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, false); + up_read(¤t->mm->mmap_sem); + } + return 0; +} + +static int ptdump_open_curknl(struct inode *inode, struct file *filp) +{ + return single_open(filp, ptdump_show_curknl, NULL); +} + +static const struct file_operations ptdump_curknl_fops = { + .owner = THIS_MODULE, + .open = ptdump_open_curknl, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +static struct dentry *pe_curusr; + +static int ptdump_show_curusr(struct seq_file *m, void *v) +{ + if (current->mm->pgd) { + down_read(¤t->mm->mmap_sem); + ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, true); + up_read(¤t->mm->mmap_sem); + } + return 0; +} + +static int ptdump_open_curusr(struct inode *inode, struct file *filp) +{ + return single_open(filp, ptdump_show_curusr, NULL); +} + +static const struct file_operations ptdump_curusr_fops = { + .owner = THIS_MODULE, + .open = ptdump_open_curusr, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +static struct dentry *dir, *pe_knl, *pe_curknl; static int __init pt_dump_debug_init(void) { @@ -30,9 +80,22 @@ static int __init pt_dump_debug_init(void) if (!dir) return -ENOMEM; - pe = debugfs_create_file("kernel", 0400, dir, NULL, &ptdump_fops); - if (!pe) + pe_knl = debugfs_create_file("kernel", 0400, dir, NULL, + &ptdump_fops); + if (!pe_knl) + goto err; + + pe_curknl = debugfs_create_file("current_kernel", 0400, + dir, NULL, &ptdump_curknl_fops); + if (!pe_curknl) + goto err; + +#ifdef CONFIG_PAGE_TABLE_ISOLATION + pe_curusr = debugfs_create_file("current_user", 0400, + dir, NULL, &ptdump_curusr_fops); + if (!pe_curusr) goto err; +#endif return 0; err: debugfs_remove_recursive(dir); diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 17f5b417f95e9e..f56902c1f04b94 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -530,8 +530,12 @@ void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd) ptdump_walk_pgd_level_core(m, pgd, false, true); } -void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd) +void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user) { +#ifdef CONFIG_PAGE_TABLE_ISOLATION + if (user && static_cpu_has(X86_FEATURE_PTI)) + pgd = kernel_to_user_pgdp(pgd); +#endif ptdump_walk_pgd_level_core(m, pgd, false, false); } EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs); From e08acdb9620bcd4c55128cad07e625cd1825e533 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 15 Dec 2017 20:35:11 +0100 Subject: [PATCH 1089/3326] x86/ldt: Make the LDT mapping RO commit 9f5cb6b32d9e0a3a7453222baaf15664d92adbf2 upstream. Now that the LDT mapping is in a known area when PAGE_TABLE_ISOLATION is enabled its a primary target for attacks, if a user space interface fails to validate a write address correctly. That can never happen, right? The SDM states: If the segment descriptors in the GDT or an LDT are placed in ROM, the processor can enter an indefinite loop if software or the processor attempts to update (write to) the ROM-based segment descriptors. To prevent this problem, set the accessed bits for all segment descriptors placed in a ROM. Also, remove operating-system or executive code that attempts to modify segment descriptors located in ROM. So its a valid approach to set the ACCESS bit when setting up the LDT entry and to map the table RO. Fixup the selftest so it can handle that new mode. Remove the manual ACCESS bit setter in set_tls_desc() as this is now pointless. Folded the patch from Peter Ziljstra. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/desc.h | 2 ++ arch/x86/kernel/ldt.c | 7 ++++++- arch/x86/kernel/tls.c | 11 ++--------- tools/testing/selftests/x86/ldt_gdt.c | 3 +-- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index bc359dd2f7f646..85e23bb7b34e31 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -21,6 +21,8 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in desc->type = (info->read_exec_only ^ 1) << 1; desc->type |= info->contents << 2; + /* Set the ACCESS bit so it can be mapped RO */ + desc->type |= 1; desc->s = 1; desc->dpl = 0x3; diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 9629c5d8267a8e..579cc4a66fdf66 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -158,7 +158,12 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) ptep = get_locked_pte(mm, va, &ptl); if (!ptep) return -ENOMEM; - pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL)); + /* + * Map it RO so the easy to find address is not a primary + * target via some kernel interface which misses a + * permission check. + */ + pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)); set_pte_at(mm, va, ptep, pte); pte_unmap_unlock(ptep, ptl); } diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index 9a9c9b076955dd..a5b802a1221272 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c @@ -93,17 +93,10 @@ static void set_tls_desc(struct task_struct *p, int idx, cpu = get_cpu(); while (n-- > 0) { - if (LDT_empty(info) || LDT_zero(info)) { + if (LDT_empty(info) || LDT_zero(info)) memset(desc, 0, sizeof(*desc)); - } else { + else fill_ldt(desc, info); - - /* - * Always set the accessed bit so that the CPU - * doesn't try to write to the (read-only) GDT. - */ - desc->type |= 1; - } ++info; ++desc; } diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c index 0304ffb714f272..1aef72df20a112 100644 --- a/tools/testing/selftests/x86/ldt_gdt.c +++ b/tools/testing/selftests/x86/ldt_gdt.c @@ -122,8 +122,7 @@ static void check_valid_segment(uint16_t index, int ldt, * NB: Different Linux versions do different things with the * accessed bit in set_thread_area(). */ - if (ar != expected_ar && - (ldt || ar != (expected_ar | AR_ACCESSED))) { + if (ar != expected_ar && ar != (expected_ar | AR_ACCESSED)) { printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n", (ldt ? "LDT" : "GDT"), index, ar, expected_ar); nerrs++; From 66f833dbed02d39c44440b6b35ac088655c32edb Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Fri, 22 Dec 2017 20:32:35 -0500 Subject: [PATCH 1090/3326] ring-buffer: Mask out the info bits when returning buffer page length commit 45d8b80c2ac5d21cd1e2954431fb676bc2b1e099 upstream. Two info bits were added to the "commit" part of the ring buffer data page when returned to be consumed. This was to inform the user space readers that events have been missed, and that the count may be stored at the end of the page. What wasn't handled, was the splice code that actually called a function to return the length of the data in order to zero out the rest of the page before sending it up to user space. These data bits were returned with the length making the value negative, and that negative value was not checked. It was compared to PAGE_SIZE, and only used if the size was less than PAGE_SIZE. Luckily PAGE_SIZE is unsigned long which made the compare an unsigned compare, meaning the negative size value did not end up causing a large portion of memory to be randomly zeroed out. Fixes: 66a8cb95ed040 ("ring-buffer: Add place holder recording of dropped events") Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/ring_buffer.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 81279c6602ff17..b37718c2d718fa 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -281,6 +281,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data); /* Missed count stored at end */ #define RB_MISSED_STORED (1 << 30) +#define RB_MISSED_FLAGS (RB_MISSED_EVENTS|RB_MISSED_STORED) + struct buffer_data_page { u64 time_stamp; /* page time stamp */ local_t commit; /* write committed index */ @@ -332,7 +334,9 @@ static void rb_init_page(struct buffer_data_page *bpage) */ size_t ring_buffer_page_len(void *page) { - return local_read(&((struct buffer_data_page *)page)->commit) + struct buffer_data_page *bpage = page; + + return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS) + BUF_PAGE_HDR_SIZE; } From 0aea6fb0e777bb89437c8883c289e05e1f153ec8 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Fri, 22 Dec 2017 21:19:29 -0500 Subject: [PATCH 1091/3326] ring-buffer: Do no reuse reader page if still in use commit ae415fa4c5248a8cf4faabd5a3c20576cb1ad607 upstream. To free the reader page that is allocated with ring_buffer_alloc_read_page(), ring_buffer_free_read_page() must be called. For faster performance, this page can be reused by the ring buffer to avoid having to free and allocate new pages. The issue arises when the page is used with a splice pipe into the networking code. The networking code may up the page counter for the page, and keep it active while sending it is queued to go to the network. The incrementing of the page ref does not prevent it from being reused in the ring buffer, and this can cause the page that is being sent out to the network to be modified before it is sent by reading new data. Add a check to the page ref counter, and only reuse the page if it is not being used anywhere else. Fixes: 73a757e63114d ("ring-buffer: Return reader page back into existing ring buffer") Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/ring_buffer.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index b37718c2d718fa..0476a937201476 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -4443,8 +4443,13 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data) { struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct buffer_data_page *bpage = data; + struct page *page = virt_to_page(bpage); unsigned long flags; + /* If the page is still in use someplace else, we can't reuse it */ + if (page_ref_count(page) > 1) + goto out; + local_irq_save(flags); arch_spin_lock(&cpu_buffer->lock); @@ -4456,6 +4461,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data) arch_spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); + out: free_page((unsigned long)bpage); } EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); From 2aec84963e5edf3c463c37da0a94fb4c12c2d2c2 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Mon, 18 Dec 2017 13:10:00 -0800 Subject: [PATCH 1092/3326] iw_cxgb4: Only validate the MSN for successful completions commit f55688c45442bc863f40ad678c638785b26cdce6 upstream. If the RECV CQE is in error, ignore the MSN check. This was causing recvs that were flushed into the sw cq to be completed with the wrong status (BAD_MSN instead of FLUSHED). Signed-off-by: Steve Wise Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/cxgb4/cq.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index eae8ea81c6e271..514c1000ded125 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -586,10 +586,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, ret = -EAGAIN; goto skip_cqe; } - if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { + if (unlikely(!CQE_STATUS(hw_cqe) && + CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) { t4_set_wq_in_error(wq); - hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN)); - goto proc_cqe; + hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN)); } goto proc_cqe; } From 23ef17a49f1edfa2c4b437c03b1c0d4da1d116d2 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Thu, 30 Nov 2017 10:15:02 +0000 Subject: [PATCH 1093/3326] ASoC: codecs: msm8916-wcd: Fix supported formats commit 51f493ae71adc2c49a317a13c38e54e1cdf46005 upstream. This codec is configurable for only 16 bit and 32 bit samples, so reflect this in the supported formats also remove 24bit sample from supported list. Signed-off-by: Srinivas Kandagatla Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/codecs/msm8916-wcd-analog.c | 2 +- sound/soc/codecs/msm8916-wcd-digital.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index 18933bf6473f47..8c7063e1aa46a4 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -267,7 +267,7 @@ #define MSM8916_WCD_ANALOG_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000) #define MSM8916_WCD_ANALOG_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ - SNDRV_PCM_FMTBIT_S24_LE) + SNDRV_PCM_FMTBIT_S32_LE) static int btn_mask = SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_BTN_4; diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c index 66df8f810f0d74..694db27b11fa57 100644 --- a/sound/soc/codecs/msm8916-wcd-digital.c +++ b/sound/soc/codecs/msm8916-wcd-digital.c @@ -194,7 +194,7 @@ SNDRV_PCM_RATE_32000 | \ SNDRV_PCM_RATE_48000) #define MSM8916_WCD_DIGITAL_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ - SNDRV_PCM_FMTBIT_S24_LE) + SNDRV_PCM_FMTBIT_S32_LE) struct msm8916_wcd_digital_priv { struct clk *ahbclk, *mclk; @@ -645,7 +645,7 @@ static int msm8916_wcd_digital_hw_params(struct snd_pcm_substream *substream, RX_I2S_CTL_RX_I2S_MODE_MASK, RX_I2S_CTL_RX_I2S_MODE_16); break; - case SNDRV_PCM_FORMAT_S24_LE: + case SNDRV_PCM_FORMAT_S32_LE: snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL, TX_I2S_CTL_TX_I2S_MODE_MASK, TX_I2S_CTL_TX_I2S_MODE_32); From 308ddf2afe83994719006200204dc819fcbd9e7a Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 8 Dec 2017 16:15:20 +0000 Subject: [PATCH 1094/3326] ASoC: wm_adsp: Fix validation of firmware and coeff lengths commit 50dd2ea8ef67a1617e0c0658bcbec4b9fb03b936 upstream. The checks for whether another region/block header could be present are subtracting the size from the current offset. Obviously we should instead subtract the offset from the size. The checks for whether the region/block data fit in the file are adding the data size to the current offset and header size, without checking for integer overflow. Rearrange these so that overflow is impossible. Signed-off-by: Ben Hutchings Acked-by: Charles Keepax Tested-by: Charles Keepax Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/codecs/wm_adsp.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 65c059b5ffd784..66e32f5d2917f2 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -1733,7 +1733,7 @@ static int wm_adsp_load(struct wm_adsp *dsp) le64_to_cpu(footer->timestamp)); while (pos < firmware->size && - pos - firmware->size > sizeof(*region)) { + sizeof(*region) < firmware->size - pos) { region = (void *)&(firmware->data[pos]); region_name = "Unknown"; reg = 0; @@ -1782,8 +1782,8 @@ static int wm_adsp_load(struct wm_adsp *dsp) regions, le32_to_cpu(region->len), offset, region_name); - if ((pos + le32_to_cpu(region->len) + sizeof(*region)) > - firmware->size) { + if (le32_to_cpu(region->len) > + firmware->size - pos - sizeof(*region)) { adsp_err(dsp, "%s.%d: %s region len %d bytes exceeds file length %zu\n", file, regions, region_name, @@ -2253,7 +2253,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp) blocks = 0; while (pos < firmware->size && - pos - firmware->size > sizeof(*blk)) { + sizeof(*blk) < firmware->size - pos) { blk = (void *)(&firmware->data[pos]); type = le16_to_cpu(blk->type); @@ -2327,8 +2327,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp) } if (reg) { - if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) > - firmware->size) { + if (le32_to_cpu(blk->len) > + firmware->size - pos - sizeof(*blk)) { adsp_err(dsp, "%s.%d: %s region len %d bytes exceeds file length %zu\n", file, blocks, region_name, From c7d231ca5e0bf38f6920e92c97feadede0978d1a Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 13 Nov 2017 12:12:55 +0100 Subject: [PATCH 1095/3326] ASoC: da7218: fix fix child-node lookup commit bc6476d6c1edcb9b97621b5131bd169aa81f27db upstream. Fix child-node lookup during probe, which ended up searching the whole device tree depth-first starting at the parent rather than just matching on its children. To make things worse, the parent codec node was also prematurely freed. Fixes: 4d50934abd22 ("ASoC: da7218: Add da7218 codec driver") Signed-off-by: Johan Hovold Acked-by: Adam Thomson Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/codecs/da7218.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/codecs/da7218.c b/sound/soc/codecs/da7218.c index b2d42ec1dcd9f7..56564ce90cb6b0 100644 --- a/sound/soc/codecs/da7218.c +++ b/sound/soc/codecs/da7218.c @@ -2520,7 +2520,7 @@ static struct da7218_pdata *da7218_of_to_pdata(struct snd_soc_codec *codec) } if (da7218->dev_id == DA7218_DEV_ID) { - hpldet_np = of_find_node_by_name(np, "da7218_hpldet"); + hpldet_np = of_get_child_by_name(np, "da7218_hpldet"); if (!hpldet_np) return pdata; From fe9f7bd45c01b4bc328d2442136c5456608f1637 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 20 Nov 2017 23:14:55 +0100 Subject: [PATCH 1096/3326] ASoC: fsl_ssi: AC'97 ops need regmap, clock and cleaning up on failure commit 695b78b548d8a26288f041e907ff17758df9e1d5 upstream. AC'97 ops (register read / write) need SSI regmap and clock, so they have to be set after them. We also need to set these ops back to NULL if we fail the probe. Signed-off-by: Maciej S. Szmigiero Acked-by: Nicolin Chen Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/fsl/fsl_ssi.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 64598d1183f8f1..3ffbb498cc7025 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -1452,12 +1452,6 @@ static int fsl_ssi_probe(struct platform_device *pdev) sizeof(fsl_ssi_ac97_dai)); fsl_ac97_data = ssi_private; - - ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev); - if (ret) { - dev_err(&pdev->dev, "could not set AC'97 ops\n"); - return ret; - } } else { /* Initialize this copy of the CPU DAI driver structure */ memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template, @@ -1568,6 +1562,14 @@ static int fsl_ssi_probe(struct platform_device *pdev) return ret; } + if (fsl_ssi_is_ac97(ssi_private)) { + ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev); + if (ret) { + dev_err(&pdev->dev, "could not set AC'97 ops\n"); + goto error_ac97_ops; + } + } + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component, &ssi_private->cpu_dai_drv, 1); if (ret) { @@ -1651,6 +1653,10 @@ static int fsl_ssi_probe(struct platform_device *pdev) fsl_ssi_debugfs_remove(&ssi_private->dbg_stats); error_asoc_register: + if (fsl_ssi_is_ac97(ssi_private)) + snd_soc_set_ac97_ops(NULL); + +error_ac97_ops: if (ssi_private->soc->imx) fsl_ssi_imx_clean(pdev, ssi_private); From 314d9cdf7e0fb83781e8fae836044856a449ffe0 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 13 Nov 2017 12:12:56 +0100 Subject: [PATCH 1097/3326] ASoC: twl4030: fix child-node lookup commit 15f8c5f2415bfac73f33a14bcd83422bcbfb5298 upstream. Fix child-node lookup during probe, which ended up searching the whole device tree depth-first starting at the parent rather than just matching on its children. To make things worse, the parent codec node was also prematurely freed, while the child node was leaked. Fixes: 2d6d649a2e0f ("ASoC: twl4030: Support for DT booted kernel") Signed-off-by: Johan Hovold Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/codecs/twl4030.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c index c482b2e7a7d2a5..cfe72b9d435606 100644 --- a/sound/soc/codecs/twl4030.c +++ b/sound/soc/codecs/twl4030.c @@ -232,7 +232,7 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec) struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev); struct device_node *twl4030_codec_node = NULL; - twl4030_codec_node = of_find_node_by_name(codec->dev->parent->of_node, + twl4030_codec_node = of_get_child_by_name(codec->dev->parent->of_node, "codec"); if (!pdata && twl4030_codec_node) { @@ -241,9 +241,11 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec) GFP_KERNEL); if (!pdata) { dev_err(codec->dev, "Can not allocate memory\n"); + of_node_put(twl4030_codec_node); return NULL; } twl4030_setup_pdata_of(pdata, twl4030_codec_node); + of_node_put(twl4030_codec_node); } return pdata; From 077cb91c9fc35d638e014374491ac6a9f41b19ba Mon Sep 17 00:00:00 2001 From: "Andrew F. Davis" Date: Wed, 29 Nov 2017 15:32:46 -0600 Subject: [PATCH 1098/3326] ASoC: tlv320aic31xx: Fix GPIO1 register definition commit 737e0b7b67bdfe24090fab2852044bb283282fc5 upstream. GPIO1 control register is number 51, fix this here. Fixes: bafcbfe429eb ("ASoC: tlv320aic31xx: Make the register values human readable") Signed-off-by: Andrew F. Davis Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/codecs/tlv320aic31xx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h index 730fb205886997..1ff3edb7bbb6b2 100644 --- a/sound/soc/codecs/tlv320aic31xx.h +++ b/sound/soc/codecs/tlv320aic31xx.h @@ -116,7 +116,7 @@ struct aic31xx_pdata { /* INT2 interrupt control */ #define AIC31XX_INT2CTRL AIC31XX_REG(0, 49) /* GPIO1 control */ -#define AIC31XX_GPIO1 AIC31XX_REG(0, 50) +#define AIC31XX_GPIO1 AIC31XX_REG(0, 51) #define AIC31XX_DACPRB AIC31XX_REG(0, 60) /* ADC Instruction Set Register */ From 074e2892a4205b0c88742d9d000c0c19e0b98244 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 15 Dec 2017 15:02:33 +0100 Subject: [PATCH 1099/3326] gpio: fix "gpio-line-names" property retrieval commit 822703354774ec935169cbbc8d503236bcb54fda upstream. Following commit 9427ecbed46cc ("gpio: Rework of_gpiochip_set_names() to use device property accessors"), "gpio-line-names" DT property is not retrieved anymore when chip->parent is not set by the driver. This is due to OF based property reads having been replaced by device based property reads. This patch fixes that by making use of fwnode_property_read_string_array() instead of device_property_read_string_array() and handing over either of_fwnode_handle(chip->of_node) or dev_fwnode(chip->parent) to that function. Fixes: 9427ecbed46cc ("gpio: Rework of_gpiochip_set_names() to use device property accessors") Signed-off-by: Christophe Leroy Acked-by: Mika Westerberg Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/gpio/gpiolib-acpi.c | 2 +- drivers/gpio/gpiolib-devprop.c | 17 +++++++---------- drivers/gpio/gpiolib-of.c | 3 ++- drivers/gpio/gpiolib.h | 3 ++- 4 files changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index eb4528c87c0b39..d6f3d9ee1350e4 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -1074,7 +1074,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip) } if (!chip->names) - devprop_gpiochip_set_names(chip); + devprop_gpiochip_set_names(chip, dev_fwnode(chip->parent)); acpi_gpiochip_request_regions(acpi_gpio); acpi_gpiochip_scan_gpios(acpi_gpio); diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c index 27f383bda7d962..f748aa3e77f720 100644 --- a/drivers/gpio/gpiolib-devprop.c +++ b/drivers/gpio/gpiolib-devprop.c @@ -19,30 +19,27 @@ /** * devprop_gpiochip_set_names - Set GPIO line names using device properties * @chip: GPIO chip whose lines should be named, if possible + * @fwnode: Property Node containing the gpio-line-names property * * Looks for device property "gpio-line-names" and if it exists assigns * GPIO line names for the chip. The memory allocated for the assigned * names belong to the underlying firmware node and should not be released * by the caller. */ -void devprop_gpiochip_set_names(struct gpio_chip *chip) +void devprop_gpiochip_set_names(struct gpio_chip *chip, + const struct fwnode_handle *fwnode) { struct gpio_device *gdev = chip->gpiodev; const char **names; int ret, i; - if (!chip->parent) { - dev_warn(&gdev->dev, "GPIO chip parent is NULL\n"); - return; - } - - ret = device_property_read_string_array(chip->parent, "gpio-line-names", + ret = fwnode_property_read_string_array(fwnode, "gpio-line-names", NULL, 0); if (ret < 0) return; if (ret != gdev->ngpio) { - dev_warn(chip->parent, + dev_warn(&gdev->dev, "names %d do not match number of GPIOs %d\n", ret, gdev->ngpio); return; @@ -52,10 +49,10 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip) if (!names) return; - ret = device_property_read_string_array(chip->parent, "gpio-line-names", + ret = fwnode_property_read_string_array(fwnode, "gpio-line-names", names, gdev->ngpio); if (ret < 0) { - dev_warn(chip->parent, "failed to read GPIO line names\n"); + dev_warn(&gdev->dev, "failed to read GPIO line names\n"); kfree(names); return; } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index bfcd20699ec889..ba38f530e4037e 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -493,7 +493,8 @@ int of_gpiochip_add(struct gpio_chip *chip) /* If the chip defines names itself, these take precedence */ if (!chip->names) - devprop_gpiochip_set_names(chip); + devprop_gpiochip_set_names(chip, + of_fwnode_handle(chip->of_node)); of_node_get(chip->of_node); diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index d003ccb127816c..3d4d0634c9ddd5 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -224,7 +224,8 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc) return desc - &desc->gdev->descs[0]; } -void devprop_gpiochip_set_names(struct gpio_chip *chip); +void devprop_gpiochip_set_names(struct gpio_chip *chip, + const struct fwnode_handle *fwnode); /* With descriptor prefix */ From 907145e68e21947ec76d99827ab4eeeecff4a34c Mon Sep 17 00:00:00 2001 From: "Michael J. Ruhl" Date: Fri, 22 Dec 2017 08:47:20 -0800 Subject: [PATCH 1100/3326] IB/hfi: Only read capability registers if the capability exists commit 4c009af473b2026caaa26107e34d7cc68dad7756 upstream. During driver init, various registers are saved to allow restoration after an FLR or gen3 bump. Some of these registers are not available in some circumstances (i.e. Virtual machines). This bug makes the driver unusable when the PCI device is passed into a VM, it fails during probe. Delete unnecessary register read/write, and only access register if the capability exists. Fixes: a618b7e40af2 ("IB/hfi1: Move saving PCI values to a separate function") Reviewed-by: Mike Marciniszyn Signed-off-by: Michael J. Ruhl Signed-off-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/hfi1/hfi.h | 1 - drivers/infiniband/hw/hfi1/pcie.c | 30 ++++++++++++------------------ 2 files changed, 12 insertions(+), 19 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 6ff44dc606ebac..3409eee1609258 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1129,7 +1129,6 @@ struct hfi1_devdata { u16 pcie_lnkctl; u16 pcie_devctl2; u32 pci_msix0; - u32 pci_lnkctl3; u32 pci_tph2; /* diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 09e50fd2a08f07..8c7e7a60b71584 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -411,15 +411,12 @@ int restore_pci_variables(struct hfi1_devdata *dd) if (ret) goto error; - ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, - dd->pci_lnkctl3); - if (ret) - goto error; - - ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2); - if (ret) - goto error; - + if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) { + ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, + dd->pci_tph2); + if (ret) + goto error; + } return 0; error: @@ -469,15 +466,12 @@ int save_pci_variables(struct hfi1_devdata *dd) if (ret) goto error; - ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, - &dd->pci_lnkctl3); - if (ret) - goto error; - - ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2); - if (ret) - goto error; - + if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) { + ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, + &dd->pci_tph2); + if (ret) + goto error; + } return 0; error: From d471542b9f0706b9d37dba40bc02ef27b05a6bc1 Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Sun, 24 Dec 2017 13:54:56 +0200 Subject: [PATCH 1101/3326] IB/mlx5: Serialize access to the VMA list commit ad9a3668a434faca1339789ed2f043d679199309 upstream. User-space applications can do mmap and munmap directly at any time. Since the VMA list is not protected with a mutex, concurrent accesses to the VMA list from the mmap and munmap can cause data corruption. Add a mutex around the list. Fixes: 7c2344c3bbf9 ("IB/mlx5: Implements disassociate_ucontext API") Reviewed-by: Yishai Hadas Signed-off-by: Majd Dibbiny Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/mlx5/main.c | 8 ++++++++ drivers/infiniband/hw/mlx5/mlx5_ib.h | 4 ++++ 2 files changed, 12 insertions(+) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 5aff1e33d98408..30d479f87cb8ae 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1415,6 +1415,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, } INIT_LIST_HEAD(&context->vma_private_list); + mutex_init(&context->vma_private_list_mutex); INIT_LIST_HEAD(&context->db_page_list); mutex_init(&context->db_page_mutex); @@ -1576,7 +1577,9 @@ static void mlx5_ib_vma_close(struct vm_area_struct *area) * mlx5_ib_disassociate_ucontext(). */ mlx5_ib_vma_priv_data->vma = NULL; + mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex); list_del(&mlx5_ib_vma_priv_data->list); + mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex); kfree(mlx5_ib_vma_priv_data); } @@ -1596,10 +1599,13 @@ static int mlx5_ib_set_vma_data(struct vm_area_struct *vma, return -ENOMEM; vma_prv->vma = vma; + vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex; vma->vm_private_data = vma_prv; vma->vm_ops = &mlx5_ib_vm_ops; + mutex_lock(&ctx->vma_private_list_mutex); list_add(&vma_prv->list, vma_head); + mutex_unlock(&ctx->vma_private_list_mutex); return 0; } @@ -1642,6 +1648,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) * mlx5_ib_vma_close. */ down_write(&owning_mm->mmap_sem); + mutex_lock(&context->vma_private_list_mutex); list_for_each_entry_safe(vma_private, n, &context->vma_private_list, list) { vma = vma_private->vma; @@ -1656,6 +1663,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) list_del(&vma_private->list); kfree(vma_private); } + mutex_unlock(&context->vma_private_list_mutex); up_write(&owning_mm->mmap_sem); mmput(owning_mm); put_task_struct(owning_process); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 189e80cd6b2f3c..754103372faa28 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -115,6 +115,8 @@ enum { struct mlx5_ib_vma_private_data { struct list_head list; struct vm_area_struct *vma; + /* protect vma_private_list add/del */ + struct mutex *vma_private_list_mutex; }; struct mlx5_ib_ucontext { @@ -129,6 +131,8 @@ struct mlx5_ib_ucontext { /* Transport Domain number */ u32 tdn; struct list_head vma_private_list; + /* protect vma_private_list add/del */ + struct mutex vma_private_list_mutex; unsigned long upd_xlt_page; /* protect ODP/KSM */ From 5f3b36984c7b15e9d6ace5fc836b3474a8bb4213 Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Sun, 24 Dec 2017 13:54:57 +0200 Subject: [PATCH 1102/3326] IB/uverbs: Fix command checking as part of ib_uverbs_ex_modify_qp() commit 05d14e7b0c138cb07ba30e464f47b39434f3fdef upstream. If the input command length is larger than the kernel supports an error should be returned in case the unsupported bytes are not cleared, instead of the other way aroudn. This matches what all other callers of ib_is_udata_cleared do and will avoid user ABI problems in the future. Fixes: 189aba99e700 ("IB/uverbs: Extend modify_qp and support packet pacing") Reviewed-by: Yishai Hadas Signed-off-by: Moni Shoua Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/uverbs_cmd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index d8f540054392ab..93c1a57dbff1b2 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2085,8 +2085,8 @@ int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, return -EOPNOTSUPP; if (ucore->inlen > sizeof(cmd)) { - if (ib_is_udata_cleared(ucore, sizeof(cmd), - ucore->inlen - sizeof(cmd))) + if (!ib_is_udata_cleared(ucore, sizeof(cmd), + ucore->inlen - sizeof(cmd))) return -EOPNOTSUPP; } From af0dc162f644335c50e2384ba485e4aa79b25e28 Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Sun, 24 Dec 2017 13:54:58 +0200 Subject: [PATCH 1103/3326] IB/core: Verify that QP is security enabled in create and destroy commit 4a50881bbac309e6f0684816a180bc3c14e1485d upstream. The XRC target QP create flow sets up qp_sec only if there is an IB link with LSM security enabled. However, several other related uAPI entry points blindly follow the qp_sec NULL pointer, resulting in a possible oops. Check for NULL before using qp_sec. Fixes: d291f1a65232 ("IB/core: Enforce PKey security on QPs") Reviewed-by: Daniel Jurgens Signed-off-by: Moni Shoua Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/security.c | 3 +++ drivers/infiniband/core/verbs.c | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index feafdb961c485c..59b2f96d986aa2 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -386,6 +386,9 @@ int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev) if (ret) return ret; + if (!qp->qp_sec) + return 0; + mutex_lock(&real_qp->qp_sec->mutex); ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys, qp->qp_sec); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index de57d6c11a2542..9032f77cc38d25 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1400,7 +1400,8 @@ int ib_close_qp(struct ib_qp *qp) spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); atomic_dec(&real_qp->usecnt); - ib_close_shared_qp_security(qp->qp_sec); + if (qp->qp_sec) + ib_close_shared_qp_security(qp->qp_sec); kfree(qp); return 0; From 056305595a9940005ff34d0a67e2a2b231d162b7 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 22 Dec 2017 10:45:07 +0100 Subject: [PATCH 1104/3326] ALSA: hda: Drop useless WARN_ON() commit a36c2638380c0a4676647a1f553b70b20d3ebce1 upstream. Since the commit 97cc2ed27e5a ("ALSA: hda - Fix yet another i915 pointer leftover in error path") cleared hdac_acomp pointer, the WARN_ON() non-NULL check in snd_hdac_i915_register_notifier() may give a false-positive warning, as the function gets called no matter whether the component is registered or not. For fixing it, let's get rid of the spurious WARN_ON(). Fixes: 97cc2ed27e5a ("ALSA: hda - Fix yet another i915 pointer leftover in error path") Reported-by: Kouta Okamoto Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/hda/hdac_i915.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index 038a180d3f8117..cbe818eda3363c 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c @@ -325,7 +325,7 @@ static int hdac_component_master_match(struct device *dev, void *data) */ int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops) { - if (WARN_ON(!hdac_acomp)) + if (!hdac_acomp) return -ENODEV; hdac_acomp->audio_ops = aops; From 2845bbd1ef1f574ce2bbd7a7f6feab04ca137cf3 Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Fri, 22 Dec 2017 11:17:44 +0800 Subject: [PATCH 1105/3326] ALSA: hda - Add MIC_NO_PRESENCE fixup for 2 HP machines commit 322f74ede933b3e2cb78768b6a6fdbfbf478a0c1 upstream. There is a headset jack on the front panel, when we plug a headset into it, the headset mic can't trigger unsol events, and read_pin_sense() can't detect its presence too. So add this fixup to fix this issue. Signed-off-by: Hui Wang Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_conexant.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index a81aacf684b263..37e1cf8218ff0f 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -271,6 +271,8 @@ enum { CXT_FIXUP_HP_SPECTRE, CXT_FIXUP_HP_GATE_MIC, CXT_FIXUP_MUTE_LED_GPIO, + CXT_FIXUP_HEADSET_MIC, + CXT_FIXUP_HP_MIC_NO_PRESENCE, }; /* for hda_fixup_thinkpad_acpi() */ @@ -350,6 +352,18 @@ static void cxt_fixup_headphone_mic(struct hda_codec *codec, } } +static void cxt_fixup_headset_mic(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct conexant_spec *spec = codec->spec; + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; + break; + } +} + /* OPLC XO 1.5 fixup */ /* OLPC XO-1.5 supports DC input mode (e.g. for use with analog sensors) @@ -880,6 +894,19 @@ static const struct hda_fixup cxt_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = cxt_fixup_mute_led_gpio, }, + [CXT_FIXUP_HEADSET_MIC] = { + .type = HDA_FIXUP_FUNC, + .v.func = cxt_fixup_headset_mic, + }, + [CXT_FIXUP_HP_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1a, 0x02a1113c }, + { } + }, + .chained = true, + .chain_id = CXT_FIXUP_HEADSET_MIC, + }, }; static const struct snd_pci_quirk cxt5045_fixups[] = { @@ -934,6 +961,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO), SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), From 3d858b85e376b08db9c9de42aa4e0b9b47fac466 Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Fri, 22 Dec 2017 11:17:46 +0800 Subject: [PATCH 1106/3326] ALSA: hda - change the location for one mic on a Lenovo machine commit 8da5bbfc7cbba909f4f32d5e1dda3750baa5d853 upstream. There are two front mics on this machine, and current driver assign the same name Mic to both of them, but pulseaudio can't handle them. As a workaround, we change the location for one of them, then the driver will assign "Front Mic" and "Mic" for them. Signed-off-by: Hui Wang Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 9ac4b9076ee241..b32ea9d66f639a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6305,6 +6305,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), From 9fcd2ae2abb5efb67f331f567ed22a7305461a80 Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Fri, 22 Dec 2017 11:17:45 +0800 Subject: [PATCH 1107/3326] ALSA: hda - fix headset mic detection issue on a Dell machine commit 285d5ddcffafa5d5e68c586f4c9eaa8b24a2897d upstream. It has the codec alc256, and add its pin definition to pin quirk table to let it apply ALC255_FIXUP_DELL1_MIC_NO_PRESENCE. Signed-off-by: Hui Wang Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b32ea9d66f639a..7ccc965ffdd6cc 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6558,6 +6558,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x1b, 0x01011020}, {0x21, 0x02211010}), + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x12, 0x90a60130}, + {0x14, 0x90170110}, + {0x1b, 0x01011020}, + {0x21, 0x0221101f}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60160}, {0x14, 0x90170120}, From 9c5ee053a67eb374878b20eab7d16feb2a87a851 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 27 Dec 2017 08:53:59 +0100 Subject: [PATCH 1108/3326] ALSA: hda - Fix missing COEF init for ALC225/295/299 commit 44be77c590f381bc629815ac789b8b15ecc4ddcf upstream. There was a long-standing problem on HP Spectre X360 with Kabylake where it lacks of the front speaker output in some situations. Also there are other products showing the similar behavior. The culprit seems to be the missing COEF setup on ALC codecs, ALC225/295/299, which are all compatible. This patch adds the proper COEF setup (to initialize idx 0x67 / bits 0x3000) for addressing the issue. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=195457 Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 7ccc965ffdd6cc..acdb196ddb4410 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -324,8 +324,12 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0292: alc_update_coef_idx(codec, 0x4, 1<<15, 0); break; - case 0x10ec0215: case 0x10ec0225: + case 0x10ec0295: + case 0x10ec0299: + alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000); + /* fallthrough */ + case 0x10ec0215: case 0x10ec0233: case 0x10ec0236: case 0x10ec0255: @@ -336,10 +340,8 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0286: case 0x10ec0288: case 0x10ec0285: - case 0x10ec0295: case 0x10ec0298: case 0x10ec0289: - case 0x10ec0299: alc_update_coef_idx(codec, 0x10, 1<<9, 0); break; case 0x10ec0275: From 0c688c288f8e82fdb5f3e0d0f3b2b1f3498ad187 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Thu, 21 Dec 2017 02:22:45 +0100 Subject: [PATCH 1109/3326] cpufreq: schedutil: Use idle_calls counter of the remote CPU commit 466a2b42d67644447a1765276259a3ea5531ddff upstream. Since the recent remote cpufreq callback work, its possible that a cpufreq update is triggered from a remote CPU. For single policies however, the current code uses the local CPU when trying to determine if the remote sg_cpu entered idle or is busy. This is incorrect. To remedy this, compare with the nohz tick idle_calls counter of the remote CPU. Fixes: 674e75411fc2 (sched: cpufreq: Allow remote cpufreq callbacks) Acked-by: Viresh Kumar Acked-by: Peter Zijlstra (Intel) Signed-off-by: Joel Fernandes Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- include/linux/tick.h | 1 + kernel/sched/cpufreq_schedutil.c | 2 +- kernel/time/tick-sched.c | 13 +++++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/include/linux/tick.h b/include/linux/tick.h index cf413b344ddb79..5cdac11dd31709 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -119,6 +119,7 @@ extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); extern ktime_t tick_nohz_get_sleep_length(void); extern unsigned long tick_nohz_get_idle_calls(void); +extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); #else /* !CONFIG_NO_HZ_COMMON */ diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 2f52ec0f1539fc..d6717a3331a1b2 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -244,7 +244,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util, #ifdef CONFIG_NO_HZ_COMMON static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { - unsigned long idle_calls = tick_nohz_get_idle_calls(); + unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); bool ret = idle_calls == sg_cpu->saved_idle_calls; sg_cpu->saved_idle_calls = idle_calls; diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index c7a899c5ce643f..e4d463adefc226 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1009,6 +1009,19 @@ ktime_t tick_nohz_get_sleep_length(void) return ts->sleep_length; } +/** + * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value + * for a particular CPU. + * + * Called from the schedutil frequency scaling governor in scheduler context. + */ +unsigned long tick_nohz_get_idle_calls_cpu(int cpu) +{ + struct tick_sched *ts = tick_get_tick_sched(cpu); + + return ts->idle_calls; +} + /** * tick_nohz_get_idle_calls - return the current idle calls counter value * From 88da02868f7741cc7c1d895840e236e6df5483ef Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Dec 2017 15:40:44 +0800 Subject: [PATCH 1110/3326] block: fix blk_rq_append_bio commit 0abc2a10389f0c9070f76ca906c7382788036b93 upstream. Commit caa4b02476e3(blk-map: call blk_queue_bounce from blk_rq_append_bio) moves blk_queue_bounce() into blk_rq_append_bio(), but don't consider the fact that the bounced bio becomes invisible to caller since the parameter type is 'struct bio *'. Make it a pointer to a pointer to a bio, so the caller sees the right bio also after a bounce. Fixes: caa4b02476e3 ("blk-map: call blk_queue_bounce from blk_rq_append_bio") Cc: Christoph Hellwig Reported-by: Michele Ballabio (handling failure of blk_rq_append_bio(), only call bio_get() after blk_rq_append_bio() returns OK) Tested-by: Michele Ballabio Signed-off-by: Ming Lei Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- block/blk-map.c | 38 +++++++++++++++++------------- drivers/scsi/osd/osd_initiator.c | 4 +++- drivers/target/target_core_pscsi.c | 4 ++-- include/linux/blkdev.h | 2 +- 4 files changed, 28 insertions(+), 20 deletions(-) diff --git a/block/blk-map.c b/block/blk-map.c index d5251edcc0ddd7..368daa02714e31 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -12,22 +12,29 @@ #include "blk.h" /* - * Append a bio to a passthrough request. Only works can be merged into - * the request based on the driver constraints. + * Append a bio to a passthrough request. Only works if the bio can be merged + * into the request based on the driver constraints. */ -int blk_rq_append_bio(struct request *rq, struct bio *bio) +int blk_rq_append_bio(struct request *rq, struct bio **bio) { - blk_queue_bounce(rq->q, &bio); + struct bio *orig_bio = *bio; + + blk_queue_bounce(rq->q, bio); if (!rq->bio) { - blk_rq_bio_prep(rq->q, rq, bio); + blk_rq_bio_prep(rq->q, rq, *bio); } else { - if (!ll_back_merge_fn(rq->q, rq, bio)) + if (!ll_back_merge_fn(rq->q, rq, *bio)) { + if (orig_bio != *bio) { + bio_put(*bio); + *bio = orig_bio; + } return -EINVAL; + } - rq->biotail->bi_next = bio; - rq->biotail = bio; - rq->__data_len += bio->bi_iter.bi_size; + rq->biotail->bi_next = *bio; + rq->biotail = *bio; + rq->__data_len += (*bio)->bi_iter.bi_size; } return 0; @@ -80,14 +87,12 @@ static int __blk_rq_map_user_iov(struct request *rq, * We link the bounce buffer in and could have to traverse it * later so we have to get a ref to prevent it from being freed */ - ret = blk_rq_append_bio(rq, bio); - bio_get(bio); + ret = blk_rq_append_bio(rq, &bio); if (ret) { - bio_endio(bio); __blk_rq_unmap_user(orig_bio); - bio_put(bio); return ret; } + bio_get(bio); return 0; } @@ -220,7 +225,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, int reading = rq_data_dir(rq) == READ; unsigned long addr = (unsigned long) kbuf; int do_copy = 0; - struct bio *bio; + struct bio *bio, *orig_bio; int ret; if (len > (queue_max_hw_sectors(q) << 9)) @@ -243,10 +248,11 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (do_copy) rq->rq_flags |= RQF_COPY_USER; - ret = blk_rq_append_bio(rq, bio); + orig_bio = bio; + ret = blk_rq_append_bio(rq, &bio); if (unlikely(ret)) { /* request is too big */ - bio_put(bio); + bio_put(orig_bio); return ret; } diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index a4f28b7e4c65df..e18877177f1b52 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -1576,7 +1576,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write, return req; for_each_bio(bio) { - ret = blk_rq_append_bio(req, bio); + struct bio *bounce_bio = bio; + + ret = blk_rq_append_bio(req, &bounce_bio); if (ret) return ERR_PTR(ret); } diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 7c69b4a9694d20..0d99b242e82e3f 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -920,7 +920,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, " %d i: %d bio: %p, allocating another" " bio\n", bio->bi_vcnt, i, bio); - rc = blk_rq_append_bio(req, bio); + rc = blk_rq_append_bio(req, &bio); if (rc) { pr_err("pSCSI: failed to append bio\n"); goto fail; @@ -938,7 +938,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, } if (bio) { - rc = blk_rq_append_bio(req, bio); + rc = blk_rq_append_bio(req, &bio); if (rc) { pr_err("pSCSI: failed to append bio\n"); goto fail; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index fd47bd96b5d3d8..1329dca3a914bc 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -952,7 +952,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, extern void blk_rq_unprep_clone(struct request *rq); extern blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq); -extern int blk_rq_append_bio(struct request *rq, struct bio *bio); +extern int blk_rq_append_bio(struct request *rq, struct bio **bio); extern void blk_delay_queue(struct request_queue *, unsigned long); extern void blk_queue_split(struct request_queue *, struct bio **); extern void blk_recount_segments(struct request_queue *, struct bio *); From eaedee932c91120868d42df39a1fc17fa6a313ef Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 18 Dec 2017 15:40:43 +0800 Subject: [PATCH 1111/3326] block: don't let passthrough IO go into .make_request_fn() commit 14cb0dc6479dc5ebc63b3a459a5d89a2f1b39fed upstream. Commit a8821f3f3("block: Improvements to bounce-buffer handling") tries to make sure that the bio to .make_request_fn won't exceed BIO_MAX_PAGES, but ignores that passthrough I/O can use blk_queue_bounce() too. Especially, passthrough IO may not be sector-aligned, and the check of 'sectors < bio_sectors(*bio_orig)' inside __blk_queue_bounce() may become true even though the max bvec number doesn't exceed BIO_MAX_PAGES, then cause the bio splitted, and the original passthrough bio is submited to generic_make_request(). This patch fixes this issue by checking if the bio is passthrough IO, and use bio_kmalloc() to allocate the cloned passthrough bio. Cc: NeilBrown Fixes: a8821f3f3("block: Improvements to bounce-buffer handling") Tested-by: Michele Ballabio Signed-off-by: Ming Lei Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- block/bounce.c | 6 ++++-- include/linux/blkdev.h | 21 +++++++++++++++++++-- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/block/bounce.c b/block/bounce.c index fceb1a96480bfb..1d05c422c932ad 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -200,6 +200,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, unsigned i = 0; bool bounce = false; int sectors = 0; + bool passthrough = bio_is_passthrough(*bio_orig); bio_for_each_segment(from, *bio_orig, iter) { if (i++ < BIO_MAX_PAGES) @@ -210,13 +211,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, if (!bounce) return; - if (sectors < bio_sectors(*bio_orig)) { + if (!passthrough && sectors < bio_sectors(*bio_orig)) { bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split); bio_chain(bio, *bio_orig); generic_make_request(*bio_orig); *bio_orig = bio; } - bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set); + bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL : + bounce_bio_set); bio_for_each_segment_all(to, bio, i) { struct page *page = to->bv_page; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1329dca3a914bc..6362e3606aa509 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -241,14 +241,24 @@ struct request { struct request *next_rq; }; +static inline bool blk_op_is_scsi(unsigned int op) +{ + return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; +} + +static inline bool blk_op_is_private(unsigned int op) +{ + return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; +} + static inline bool blk_rq_is_scsi(struct request *rq) { - return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT; + return blk_op_is_scsi(req_op(rq)); } static inline bool blk_rq_is_private(struct request *rq) { - return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT; + return blk_op_is_private(req_op(rq)); } static inline bool blk_rq_is_passthrough(struct request *rq) @@ -256,6 +266,13 @@ static inline bool blk_rq_is_passthrough(struct request *rq) return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); } +static inline bool bio_is_passthrough(struct bio *bio) +{ + unsigned op = bio_op(bio); + + return blk_op_is_scsi(op) || blk_op_is_private(op); +} + static inline unsigned short req_get_ioprio(struct request *req) { return req->ioprio; From aa7f9011bc01b22658d4c255e2feecf0193e092d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 29 Dec 2017 17:34:43 -0800 Subject: [PATCH 1112/3326] kbuild: add '-fno-stack-check' to kernel build options MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 3ce120b16cc548472f80cf8644f90eda958cf1b6 upstream. It appears that hardened gentoo enables "-fstack-check" by default for gcc. That doesn't work _at_all_ for the kernel, because the kernel stack doesn't act like a user stack at all: it's much smaller, and it doesn't auto-expand on use. So the extra "probe one page below the stack" code generated by -fstack-check just breaks the kernel in horrible ways, causing infinite double faults etc. [ I have to say, that the particular code gcc generates looks very stupid even for user space where it works, but that's a separate issue. ] Reported-and-tested-by: Alexander Tsoy Reported-and-tested-by: Toralf Förster Cc: Dave Hansen Cc: Jiri Kosina Cc: Andy Lutomirski Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 9edfb78836a9e4..930fa93f397d7b 100644 --- a/Makefile +++ b/Makefile @@ -802,6 +802,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) +# Make sure -fstack-check isn't enabled (like gentoo apparently did) +KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,) + # conserve stack if available KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) From 57dfc3d10e40053e70691260a791bb453f183cfd Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 11 Dec 2017 07:17:39 -0800 Subject: [PATCH 1113/3326] ipv4: igmp: guard against silly MTU values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit b5476022bbada3764609368f03329ca287528dc8 ] IPv4 stack reacts to changes to small MTU, by disabling itself under RTNL. But there is a window where threads not using RTNL can see a wrong device mtu. This can lead to surprises, in igmp code where it is assumed the mtu is suitable. Fix this by reading device mtu once and checking IPv4 minimal MTU. This patch adds missing IPV4_MIN_MTU define, to not abuse ETH_MIN_MTU anymore. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/ip.h | 1 + net/ipv4/devinet.c | 2 +- net/ipv4/igmp.c | 24 +++++++++++++++--------- net/ipv4/ip_tunnel.c | 4 ++-- 4 files changed, 19 insertions(+), 12 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index 9896f46cbbf112..af8addbaa3c188 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -34,6 +34,7 @@ #include #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ +#define IPV4_MIN_MTU 68 /* RFC 791 */ struct sock; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index d7adc061659989..bffa88ecc534b9 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1420,7 +1420,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) static bool inetdev_valid_mtu(unsigned int mtu) { - return mtu >= 68; + return mtu >= IPV4_MIN_MTU; } static void inetdev_send_gratuitous_arp(struct net_device *dev, diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index ab183af0b5b6a8..86f1786fa94b92 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -404,16 +404,17 @@ static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel) } static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, - int type, struct igmpv3_grec **ppgr) + int type, struct igmpv3_grec **ppgr, unsigned int mtu) { struct net_device *dev = pmc->interface->dev; struct igmpv3_report *pih; struct igmpv3_grec *pgr; - if (!skb) - skb = igmpv3_newpack(dev, dev->mtu); - if (!skb) - return NULL; + if (!skb) { + skb = igmpv3_newpack(dev, mtu); + if (!skb) + return NULL; + } pgr = skb_put(skb, sizeof(struct igmpv3_grec)); pgr->grec_type = type; pgr->grec_auxwords = 0; @@ -436,12 +437,17 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, struct igmpv3_grec *pgr = NULL; struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; int scount, stotal, first, isquery, truncate; + unsigned int mtu; if (pmc->multiaddr == IGMP_ALL_HOSTS) return skb; if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) return skb; + mtu = READ_ONCE(dev->mtu); + if (mtu < IPV4_MIN_MTU) + return skb; + isquery = type == IGMPV3_MODE_IS_INCLUDE || type == IGMPV3_MODE_IS_EXCLUDE; truncate = type == IGMPV3_MODE_IS_EXCLUDE || @@ -462,7 +468,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { if (skb) igmpv3_sendpack(skb); - skb = igmpv3_newpack(dev, dev->mtu); + skb = igmpv3_newpack(dev, mtu); } } first = 1; @@ -498,12 +504,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, pgr->grec_nsrcs = htons(scount); if (skb) igmpv3_sendpack(skb); - skb = igmpv3_newpack(dev, dev->mtu); + skb = igmpv3_newpack(dev, mtu); first = 1; scount = 0; } if (first) { - skb = add_grhead(skb, pmc, type, &pgr); + skb = add_grhead(skb, pmc, type, &pgr, mtu); first = 0; } if (!skb) @@ -538,7 +544,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, igmpv3_sendpack(skb); skb = NULL; /* add_grhead will get a new one */ } - skb = add_grhead(skb, pmc, type, &pgr); + skb = add_grhead(skb, pmc, type, &pgr, mtu); } } if (pgr) diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index e9805ad664ac24..4e90082b23a6ea 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -349,8 +349,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev) dev->needed_headroom = t_hlen + hlen; mtu -= (dev->hard_header_len + t_hlen); - if (mtu < 68) - mtu = 68; + if (mtu < IPV4_MIN_MTU) + mtu = IPV4_MIN_MTU; return mtu; } From 521f4d9625b7b2359dbec6039724b4feb9e8490d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 11 Dec 2017 07:03:38 -0800 Subject: [PATCH 1114/3326] ipv6: mcast: better catch silly mtu values [ Upstream commit b9b312a7a451e9c098921856e7cfbc201120e1a7 ] syzkaller reported crashes in IPv6 stack [1] Xin Long found that lo MTU was set to silly values. IPv6 stack reacts to changes to small MTU, by disabling itself under RTNL. But there is a window where threads not using RTNL can see a wrong device mtu. This can lead to surprises, in mld code where it is assumed the mtu is suitable. Fix this by reading device mtu once and checking IPv6 minimal MTU. [1] skbuff: skb_over_panic: text:0000000010b86b8d len:196 put:20 head:000000003b477e60 data:000000000e85441e tail:0xd4 end:0xc0 dev:lo ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:104! invalid opcode: 0000 [#1] SMP KASAN Dumping ftrace buffer: (ftrace buffer empty) Modules linked in: CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.15.0-rc2-mm1+ #39 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 RIP: 0010:skb_panic+0x15c/0x1f0 net/core/skbuff.c:100 RSP: 0018:ffff8801db307508 EFLAGS: 00010286 RAX: 0000000000000082 RBX: ffff8801c517e840 RCX: 0000000000000000 RDX: 0000000000000082 RSI: 1ffff1003b660e61 RDI: ffffed003b660e95 RBP: ffff8801db307570 R08: 1ffff1003b660e23 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: ffffffff85bd4020 R13: ffffffff84754ed2 R14: 0000000000000014 R15: ffff8801c4e26540 FS: 0000000000000000(0000) GS:ffff8801db300000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000463610 CR3: 00000001c6698000 CR4: 00000000001406e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: skb_over_panic net/core/skbuff.c:109 [inline] skb_put+0x181/0x1c0 net/core/skbuff.c:1694 add_grhead.isra.24+0x42/0x3b0 net/ipv6/mcast.c:1695 add_grec+0xa55/0x1060 net/ipv6/mcast.c:1817 mld_send_cr net/ipv6/mcast.c:1903 [inline] mld_ifc_timer_expire+0x4d2/0x770 net/ipv6/mcast.c:2448 call_timer_fn+0x23b/0x840 kernel/time/timer.c:1320 expire_timers kernel/time/timer.c:1357 [inline] __run_timers+0x7e1/0xb60 kernel/time/timer.c:1660 run_timer_softirq+0x4c/0xb0 kernel/time/timer.c:1686 __do_softirq+0x29d/0xbb2 kernel/softirq.c:285 invoke_softirq kernel/softirq.c:365 [inline] irq_exit+0x1d3/0x210 kernel/softirq.c:405 exiting_irq arch/x86/include/asm/apic.h:540 [inline] smp_apic_timer_interrupt+0x16b/0x700 arch/x86/kernel/apic/apic.c:1052 apic_timer_interrupt+0xa9/0xb0 arch/x86/entry/entry_64.S:920 Signed-off-by: Eric Dumazet Reported-by: syzbot Tested-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/mcast.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 12b7c27ce5ce91..9a38a2c641facd 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1682,16 +1682,16 @@ static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) } static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, - int type, struct mld2_grec **ppgr) + int type, struct mld2_grec **ppgr, unsigned int mtu) { - struct net_device *dev = pmc->idev->dev; struct mld2_report *pmr; struct mld2_grec *pgr; - if (!skb) - skb = mld_newpack(pmc->idev, dev->mtu); - if (!skb) - return NULL; + if (!skb) { + skb = mld_newpack(pmc->idev, mtu); + if (!skb) + return NULL; + } pgr = skb_put(skb, sizeof(struct mld2_grec)); pgr->grec_type = type; pgr->grec_auxwords = 0; @@ -1714,10 +1714,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, struct mld2_grec *pgr = NULL; struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; int scount, stotal, first, isquery, truncate; + unsigned int mtu; if (pmc->mca_flags & MAF_NOREPORT) return skb; + mtu = READ_ONCE(dev->mtu); + if (mtu < IPV6_MIN_MTU) + return skb; + isquery = type == MLD2_MODE_IS_INCLUDE || type == MLD2_MODE_IS_EXCLUDE; truncate = type == MLD2_MODE_IS_EXCLUDE || @@ -1738,7 +1743,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { if (skb) mld_sendpack(skb); - skb = mld_newpack(idev, dev->mtu); + skb = mld_newpack(idev, mtu); } } first = 1; @@ -1774,12 +1779,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, pgr->grec_nsrcs = htons(scount); if (skb) mld_sendpack(skb); - skb = mld_newpack(idev, dev->mtu); + skb = mld_newpack(idev, mtu); first = 1; scount = 0; } if (first) { - skb = add_grhead(skb, pmc, type, &pgr); + skb = add_grhead(skb, pmc, type, &pgr, mtu); first = 0; } if (!skb) @@ -1814,7 +1819,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, mld_sendpack(skb); skb = NULL; /* add_grhead will get a new one */ } - skb = add_grhead(skb, pmc, type, &pgr); + skb = add_grhead(skb, pmc, type, &pgr, mtu); } } if (pgr) From f55ac6684640a285465cf31a8d5e23bcc3da872d Mon Sep 17 00:00:00 2001 From: Fugang Duan Date: Fri, 22 Dec 2017 17:12:09 +0800 Subject: [PATCH 1115/3326] net: fec: unmap the xmit buffer that are not transferred by DMA [ Upstream commit 178e5f57a8d8f8fc5799a624b96fc31ef9a29ffa ] The enet IP only support 32 bit, it will use swiotlb buffer to do dma mapping when xmit buffer DMA memory address is bigger than 4G in i.MX platform. After stress suspend/resume test, it will print out: log: [12826.352864] fec 5b040000.ethernet: swiotlb buffer is full (sz: 191 bytes) [12826.359676] DMA: Out of SW-IOMMU space for 191 bytes at device 5b040000.ethernet [12826.367110] fec 5b040000.ethernet eth0: Tx DMA memory map failed The issue is that the ready xmit buffers that are dma mapped but DMA still don't copy them into fifo, once MAC restart, these DMA buffers are not unmapped. So it should check the dma mapping buffer and unmap them. Signed-off-by: Fugang Duan Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/freescale/fec_main.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3dc2d771a22214..faf7cdc97ebfc5 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -818,6 +818,12 @@ static void fec_enet_bd_init(struct net_device *dev) for (i = 0; i < txq->bd.ring_size; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = cpu_to_fec16(0); + if (bdp->cbd_bufaddr && + !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); if (txq->tx_skbuff[i]) { dev_kfree_skb_any(txq->tx_skbuff[i]); txq->tx_skbuff[i] = NULL; From f9c48469278060450e99c3995c5b0717c48d50b8 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Mon, 11 Dec 2017 11:13:45 -0800 Subject: [PATCH 1116/3326] net: igmp: Use correct source address on IGMPv3 reports [ Upstream commit a46182b00290839fa3fa159d54fd3237bd8669f0 ] Closing a multicast socket after the final IPv4 address is deleted from an interface can generate a membership report that uses the source IP from a different interface. The following test script, run from an isolated netns, reproduces the issue: #!/bin/bash ip link add dummy0 type dummy ip link add dummy1 type dummy ip link set dummy0 up ip link set dummy1 up ip addr add 10.1.1.1/24 dev dummy0 ip addr add 192.168.99.99/24 dev dummy1 tcpdump -U -i dummy0 & socat EXEC:"sleep 2" \ UDP4-DATAGRAM:239.101.1.68:8889,ip-add-membership=239.0.1.68:10.1.1.1 & sleep 1 ip addr del 10.1.1.1/24 dev dummy0 sleep 5 kill %tcpdump RFC 3376 specifies that the report must be sent with a valid IP source address from the destination subnet, or from address 0.0.0.0. Add an extra check to make sure this is the case. Signed-off-by: Kevin Cernekee Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/igmp.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 86f1786fa94b92..c621266e0306df 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -89,6 +89,7 @@ #include #include #include +#include #include #include @@ -321,6 +322,23 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) return scount; } +/* source address selection per RFC 3376 section 4.2.13 */ +static __be32 igmpv3_get_srcaddr(struct net_device *dev, + const struct flowi4 *fl4) +{ + struct in_device *in_dev = __in_dev_get_rcu(dev); + + if (!in_dev) + return htonl(INADDR_ANY); + + for_ifa(in_dev) { + if (inet_ifa_match(fl4->saddr, ifa)) + return fl4->saddr; + } endfor_ifa(in_dev); + + return htonl(INADDR_ANY); +} + static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) { struct sk_buff *skb; @@ -368,7 +386,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) pip->frag_off = htons(IP_DF); pip->ttl = 1; pip->daddr = fl4.daddr; - pip->saddr = fl4.saddr; + pip->saddr = igmpv3_get_srcaddr(dev, &fl4); pip->protocol = IPPROTO_IGMP; pip->tot_len = 0; /* filled in later */ ip_select_ident(net, skb, NULL); From e3fb538e5715250d6a61a26925215229f2e9f52f Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Wed, 6 Dec 2017 12:12:27 -0800 Subject: [PATCH 1117/3326] netlink: Add netns check on taps [ Upstream commit 93c647643b48f0131f02e45da3bd367d80443291 ] Currently, a nlmon link inside a child namespace can observe systemwide netlink activity. Filter the traffic so that nlmon can only sniff netlink messages from its own netns. Test case: vpnns -- bash -c "ip link add nlmon0 type nlmon; \ ip link set nlmon0 up; \ tcpdump -i nlmon0 -q -w /tmp/nlmon.pcap -U" & sudo ip xfrm state add src 10.1.1.1 dst 10.1.1.2 proto esp \ spi 0x1 mode transport \ auth sha1 0x6162633132330000000000000000000000000000 \ enc aes 0x00000000000000000000000000000000 grep --binary abc123 /tmp/nlmon.pcap Signed-off-by: Kevin Cernekee Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/netlink/af_netlink.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 15c99dfa3d728d..aac9d68b463604 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -254,6 +254,9 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb, struct sock *sk = skb->sk; int ret = -ENOMEM; + if (!net_eq(dev_net(dev), sock_net(sk))) + return 0; + dev_hold(dev); if (is_vmalloc_addr(skb->head)) From 1bad9c5ea85e7522d403559cc249dd7a0ae705b1 Mon Sep 17 00:00:00 2001 From: Sebastian Sjoholm Date: Mon, 11 Dec 2017 21:51:14 +0100 Subject: [PATCH 1118/3326] net: qmi_wwan: add Sierra EM7565 1199:9091 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit aceef61ee56898cfa7b6960fb60b9326c3860441 ] Sierra Wireless EM7565 is an Qualcomm MDM9x50 based M.2 modem. The USB id is added to qmi_wwan.c to allow QMI communication with the EM7565. Signed-off-by: Sebastian Sjoholm Acked-by: Bjørn Mork Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 81394a4b280346..2092febfcb4253 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1204,6 +1204,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ + {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ From 6d0317869c9153d7e6efa769a764b54019d4a3d3 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 20 Dec 2017 12:10:21 -0800 Subject: [PATCH 1119/3326] net: reevalulate autoflowlabel setting after sysctl setting [ Upstream commit 513674b5a2c9c7a67501506419da5c3c77ac6f08 ] sysctl.ip6.auto_flowlabels is default 1. In our hosts, we set it to 2. If sockopt doesn't set autoflowlabel, outcome packets from the hosts are supposed to not include flowlabel. This is true for normal packet, but not for reset packet. The reason is ipv6_pinfo.autoflowlabel is set in sock creation. Later if we change sysctl.ip6.auto_flowlabels, the ipv6_pinfo.autoflowlabel isn't changed, so the sock will keep the old behavior in terms of auto flowlabel. Reset packet is suffering from this problem, because reset packet is sent from a special control socket, which is created at boot time. Since sysctl.ipv6.auto_flowlabels is 1 by default, the control socket will always have its ipv6_pinfo.autoflowlabel set, even after user set sysctl.ipv6.auto_flowlabels to 1, so reset packset will always have flowlabel. Normal sock created before sysctl setting suffers from the same issue. We can't even turn off autoflowlabel unless we kill all socks in the hosts. To fix this, if IPV6_AUTOFLOWLABEL sockopt is used, we use the autoflowlabel setting from user, otherwise we always call ip6_default_np_autolabel() which has the new settings of sysctl. Note, this changes behavior a little bit. Before commit 42240901f7c4 (ipv6: Implement different admin modes for automatic flow labels), the autoflowlabel behavior of a sock isn't sticky, eg, if sysctl changes, existing connection will change autoflowlabel behavior. After that commit, autoflowlabel behavior is sticky in the whole life of the sock. With this patch, the behavior isn't sticky again. Cc: Martin KaFai Lau Cc: Eric Dumazet Cc: Tom Herbert Signed-off-by: Shaohua Li Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/linux/ipv6.h | 3 ++- net/ipv6/af_inet6.c | 1 - net/ipv6/ip6_output.c | 12 ++++++++++-- net/ipv6/ipv6_sockglue.c | 1 + 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ea04ca024f0d2a..067a6fa675eda0 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -272,7 +272,8 @@ struct ipv6_pinfo { * 100: prefer care-of address */ dontfrag:1, - autoflowlabel:1; + autoflowlabel:1, + autoflowlabel_set:1; __u8 min_hopcount; __u8 tclass; __be32 rcv_flowinfo; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index fe5262fd6aa5c2..bcbd5f3bf8bd08 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -210,7 +210,6 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol, np->mcast_hops = IPV6_DEFAULT_MCASTHOPS; np->mc_loop = 1; np->pmtudisc = IPV6_PMTUDISC_WANT; - np->autoflowlabel = ip6_default_np_autolabel(net); np->repflow = net->ipv6.sysctl.flowlabel_reflect; sk->sk_ipv6only = net->ipv6.sysctl.bindv6only; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 5110a418cc4d0c..f7dd51c4231415 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -166,6 +166,14 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb) !(IP6CB(skb)->flags & IP6SKB_REROUTED)); } +static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np) +{ + if (!np->autoflowlabel_set) + return ip6_default_np_autolabel(net); + else + return np->autoflowlabel; +} + /* * xmit an sk_buff (used by TCP, SCTP and DCCP) * Note : socket lock is not held for SYNACK packets, but might be modified @@ -230,7 +238,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, hlimit = ip6_dst_hoplimit(dst); ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel, - np->autoflowlabel, fl6)); + ip6_autoflowlabel(net, np), fl6)); hdr->payload_len = htons(seg_len); hdr->nexthdr = proto; @@ -1626,7 +1634,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, ip6_flow_hdr(hdr, v6_cork->tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel, - np->autoflowlabel, fl6)); + ip6_autoflowlabel(net, np), fl6)); hdr->hop_limit = v6_cork->hop_limit; hdr->nexthdr = proto; hdr->saddr = fl6->saddr; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index a5e466d4e09310..90dbfa78a39075 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -878,6 +878,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, break; case IPV6_AUTOFLOWLABEL: np->autoflowlabel = valbool; + np->autoflowlabel_set = 1; retv = 0; break; case IPV6_RECVFRAGSIZE: From 78ce0e9c4183a34b0910ec36c4ad8df1f7bf7874 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 5 Dec 2017 21:29:37 +0200 Subject: [PATCH 1120/3326] ptr_ring: add barriers [ Upstream commit a8ceb5dbfde1092b466936bca0ff3be127ecf38e ] Users of ptr_ring expect that it's safe to give the data structure a pointer and have it be available to consumers, but that actually requires an smb_wmb or a stronger barrier. In absence of such barriers and on architectures that reorder writes, consumer might read an un=initialized value from an skb pointer stored in the skb array. This was observed causing crashes. To fix, add memory barriers. The barrier we use is a wmb, the assumption being that producers do not need to read the value so we do not need to order these reads. Reported-by: George Cherian Suggested-by: Jason Wang Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/linux/ptr_ring.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 37b4bb2545b32d..6866df4f31b59d 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -101,12 +101,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r) /* Note: callers invoking this in a loop must use a compiler barrier, * for example cpu_relax(). Callers must hold producer_lock. + * Callers are responsible for making sure pointer that is being queued + * points to a valid data. */ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) { if (unlikely(!r->size) || r->queue[r->producer]) return -ENOSPC; + /* Make sure the pointer we are storing points to a valid data. */ + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ + smp_wmb(); + r->queue[r->producer++] = ptr; if (unlikely(r->producer >= r->size)) r->producer = 0; @@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) if (ptr) __ptr_ring_discard_one(r); + /* Make sure anyone accessing data through the pointer is up to date. */ + /* Pairs with smp_wmb in __ptr_ring_produce. */ + smp_read_barrier_depends(); return ptr; } From e7728247372ca9f7faccca4a37e96964f796c94b Mon Sep 17 00:00:00 2001 From: Avinash Repaka Date: Thu, 21 Dec 2017 20:17:04 -0800 Subject: [PATCH 1121/3326] RDS: Check cmsg_len before dereferencing CMSG_DATA [ Upstream commit 14e138a86f6347c6199f610576d2e11c03bec5f0 ] RDS currently doesn't check if the length of the control message is large enough to hold the required data, before dereferencing the control message data. This results in following crash: BUG: KASAN: stack-out-of-bounds in rds_rdma_bytes net/rds/send.c:1013 [inline] BUG: KASAN: stack-out-of-bounds in rds_sendmsg+0x1f02/0x1f90 net/rds/send.c:1066 Read of size 8 at addr ffff8801c928fb70 by task syzkaller455006/3157 CPU: 0 PID: 3157 Comm: syzkaller455006 Not tainted 4.15.0-rc3+ #161 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:17 [inline] dump_stack+0x194/0x257 lib/dump_stack.c:53 print_address_description+0x73/0x250 mm/kasan/report.c:252 kasan_report_error mm/kasan/report.c:351 [inline] kasan_report+0x25b/0x340 mm/kasan/report.c:409 __asan_report_load8_noabort+0x14/0x20 mm/kasan/report.c:430 rds_rdma_bytes net/rds/send.c:1013 [inline] rds_sendmsg+0x1f02/0x1f90 net/rds/send.c:1066 sock_sendmsg_nosec net/socket.c:628 [inline] sock_sendmsg+0xca/0x110 net/socket.c:638 ___sys_sendmsg+0x320/0x8b0 net/socket.c:2018 __sys_sendmmsg+0x1ee/0x620 net/socket.c:2108 SYSC_sendmmsg net/socket.c:2139 [inline] SyS_sendmmsg+0x35/0x60 net/socket.c:2134 entry_SYSCALL_64_fastpath+0x1f/0x96 RIP: 0033:0x43fe49 RSP: 002b:00007fffbe244ad8 EFLAGS: 00000217 ORIG_RAX: 0000000000000133 RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 000000000043fe49 RDX: 0000000000000001 RSI: 000000002020c000 RDI: 0000000000000003 RBP: 00000000006ca018 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000217 R12: 00000000004017b0 R13: 0000000000401840 R14: 0000000000000000 R15: 0000000000000000 To fix this, we verify that the cmsg_len is large enough to hold the data to be read, before proceeding further. Reported-by: syzbot Signed-off-by: Avinash Repaka Acked-by: Santosh Shilimkar Reviewed-by: Yuval Shaia Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/rds/send.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/rds/send.c b/net/rds/send.c index b52cdc8ae42881..f72466c63f0c56 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -1009,6 +1009,9 @@ static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes) continue; if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) { + if (cmsg->cmsg_len < + CMSG_LEN(sizeof(struct rds_rdma_args))) + return -EINVAL; args = CMSG_DATA(cmsg); *rdma_bytes += args->remote_vec.bytes; } From e414e7f03c29a4d3f07e042e18bdfe84269897df Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Thu, 7 Dec 2017 12:43:30 -0500 Subject: [PATCH 1122/3326] tcp_bbr: record "full bw reached" decision in new full_bw_reached bit [ Upstream commit c589e69b508d29ed8e644dfecda453f71c02ec27 ] This commit records the "full bw reached" decision in a new full_bw_reached bit. This is a pure refactor that does not change the current behavior, but enables subsequent fixes and improvements. In particular, this enables simple and clean fixes because the full_bw and full_bw_cnt can be unconditionally zeroed without worrying about forgetting that we estimated we filled the pipe in Startup. And it enables future improvements because multiple code paths can be used for estimating that we filled the pipe in Startup; any new code paths only need to set this bit when they think the pipe is full. Note that this fix intentionally reduces the width of the full_bw_cnt counter, since we have never used the most significant bit. Signed-off-by: Neal Cardwell Reviewed-by: Yuchung Cheng Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_bbr.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 69ee877574d08b..3089c956b9f927 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -110,7 +110,8 @@ struct bbr { u32 lt_last_lost; /* LT intvl start: tp->lost */ u32 pacing_gain:10, /* current gain for setting pacing rate */ cwnd_gain:10, /* current gain for setting cwnd */ - full_bw_cnt:3, /* number of rounds without large bw gains */ + full_bw_reached:1, /* reached full bw in Startup? */ + full_bw_cnt:2, /* number of rounds without large bw gains */ cycle_idx:3, /* current index in pacing_gain cycle array */ has_seen_rtt:1, /* have we seen an RTT sample yet? */ unused_b:5; @@ -180,7 +181,7 @@ static bool bbr_full_bw_reached(const struct sock *sk) { const struct bbr *bbr = inet_csk_ca(sk); - return bbr->full_bw_cnt >= bbr_full_bw_cnt; + return bbr->full_bw_reached; } /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */ @@ -717,6 +718,7 @@ static void bbr_check_full_bw_reached(struct sock *sk, return; } ++bbr->full_bw_cnt; + bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt; } /* If pipe is probably full, drain the queue and then enter steady-state. */ @@ -850,6 +852,7 @@ static void bbr_init(struct sock *sk) bbr->restore_cwnd = 0; bbr->round_start = 0; bbr->idle_restart = 0; + bbr->full_bw_reached = 0; bbr->full_bw = 0; bbr->full_bw_cnt = 0; bbr->cycle_mstamp = 0; From 4f2963559f29f34a8dbcc35a331f152fa3492224 Mon Sep 17 00:00:00 2001 From: Christoph Paasch Date: Mon, 11 Dec 2017 00:05:46 -0800 Subject: [PATCH 1123/3326] tcp md5sig: Use skb's saddr when replying to an incoming segment [ Upstream commit 30791ac41927ebd3e75486f9504b6d2280463bf0 ] The MD5-key that belongs to a connection is identified by the peer's IP-address. When we are in tcp_v4(6)_reqsk_send_ack(), we are replying to an incoming segment from tcp_check_req() that failed the seq-number checks. Thus, to find the correct key, we need to use the skb's saddr and not the daddr. This bug seems to have been there since quite a while, but probably got unnoticed because the consequences are not catastrophic. We will call tcp_v4_reqsk_send_ack only to send a challenge-ACK back to the peer, thus the connection doesn't really fail. Fixes: 9501f9722922 ("tcp md5sig: Let the caller pass appropriate key for tcp_v{4,6}_do_calc_md5_hash().") Signed-off-by: Christoph Paasch Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_ipv4.c | 2 +- net/ipv6/tcp_ipv6.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5a5ed4f1467839..cab4b935e4743e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -844,7 +844,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, req->ts_recent, 0, - tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, + tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr, AF_INET), inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, ip_hdr(skb)->tos); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 32ded300633d86..237cc6187c5a47 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -988,7 +988,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, req->ts_recent, sk->sk_bound_dev_if, - tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), + tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr), 0, 0); } From d3f3d4134eb700ad087691effbdf26fe8f3bfd90 Mon Sep 17 00:00:00 2001 From: Brian King Date: Fri, 15 Dec 2017 15:21:50 -0600 Subject: [PATCH 1124/3326] tg3: Fix rx hang on MTU change with 5717/5719 [ Upstream commit 748a240c589824e9121befb1cba5341c319885bc ] This fixes a hang issue seen when changing the MTU size from 1500 MTU to 9000 MTU on both 5717 and 5719 chips. In discussion with Broadcom, they've indicated that these chipsets have the same phy as the 57766 chipset, so the same workarounds apply. This has been tested by IBM on both Power 8 and Power 9 systems as well as by Broadcom on x86 hardware and has been confirmed to resolve the hang issue. Signed-off-by: Brian King Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/broadcom/tg3.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 656e6af70f0a03..aef3fcf2f5b99b 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -14227,7 +14227,9 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) /* Reset PHY, otherwise the read DMA engine will be in a mode that * breaks all requests to 256 bytes. */ - if (tg3_asic_rev(tp) == ASIC_REV_57766) + if (tg3_asic_rev(tp) == ASIC_REV_57766 || + tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719) reset_phy = true; err = tg3_restart_hw(tp, reset_phy); From eb710b5f62ad3a18742bae70f91e8664ee23cbe3 Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Thu, 7 Dec 2017 12:43:31 -0500 Subject: [PATCH 1125/3326] tcp_bbr: reset full pipe detection on loss recovery undo [ Upstream commit 2f6c498e4f15d27852c04ed46d804a39137ba364 ] Fix BBR so that upon notification of a loss recovery undo BBR resets the full pipe detection (STARTUP exit) state machine. Under high reordering, reordering events can be interpreted as loss. If the reordering and spurious loss estimates are high enough, this could previously cause BBR to spuriously estimate that the pipe is full. Since spurious loss recovery means that our overall sending will have slowed down spuriously, this commit gives a flow more time to probe robustly for bandwidth and decide the pipe is really full. Signed-off-by: Neal Cardwell Reviewed-by: Yuchung Cheng Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_bbr.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 3089c956b9f927..ab3ff14ea7f763 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -874,6 +874,10 @@ static u32 bbr_sndbuf_expand(struct sock *sk) */ static u32 bbr_undo_cwnd(struct sock *sk) { + struct bbr *bbr = inet_csk_ca(sk); + + bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */ + bbr->full_bw_cnt = 0; return tcp_sk(sk)->snd_cwnd; } From c7e9d724785dc9b6a423bddc6b4fa6fb1368692f Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Thu, 7 Dec 2017 12:43:32 -0500 Subject: [PATCH 1126/3326] tcp_bbr: reset long-term bandwidth sampling on loss recovery undo [ Upstream commit 600647d467c6d04b3954b41a6ee1795b5ae00550 ] Fix BBR so that upon notification of a loss recovery undo BBR resets long-term bandwidth sampling. Under high reordering, reordering events can be interpreted as loss. If the reordering and spurious loss estimates are high enough, this can cause BBR to spuriously estimate that we are seeing loss rates high enough to trigger long-term bandwidth estimation. To avoid that problem, this commit resets long-term bandwidth sampling on loss recovery undo events. Signed-off-by: Neal Cardwell Reviewed-by: Yuchung Cheng Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_bbr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index ab3ff14ea7f763..8322f26e770e44 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -878,6 +878,7 @@ static u32 bbr_undo_cwnd(struct sock *sk) bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */ bbr->full_bw_cnt = 0; + bbr_reset_lt_bw_sampling(sk); return tcp_sk(sk)->snd_cwnd; } From 621b5ae0f9f4f9ef91bf441afc086ecf5e752d51 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 13 Dec 2017 18:56:29 +0100 Subject: [PATCH 1127/3326] s390/qeth: apply takeover changes when mode is toggled [ Upstream commit 7fbd9493f0eeae8cef58300505a9ef5c8fce6313 ] Just as for an explicit enable/disable, toggling the takeover mode also requires that the IP addresses get updated. Otherwise all IPs that were added to the table before the mode-toggle, get registered with the old settings. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/s390/net/qeth_core.h | 2 +- drivers/s390/net/qeth_core_main.c | 2 +- drivers/s390/net/qeth_l3_sys.c | 35 +++++++++++++++---------------- 3 files changed, 19 insertions(+), 20 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 5340efc673a924..0a7bf3cbb70142 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -564,7 +564,7 @@ enum qeth_cq { }; struct qeth_ipato { - int enabled; + bool enabled; int invert4; int invert6; struct list_head entries; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 330e5d3dadf38a..be95f8c8f7a792 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1479,7 +1479,7 @@ static int qeth_setup_card(struct qeth_card *card) qeth_set_intial_options(card); /* IP address takeover */ INIT_LIST_HEAD(&card->ipato.entries); - card->ipato.enabled = 0; + card->ipato.enabled = false; card->ipato.invert4 = 0; card->ipato.invert6 = 0; /* init QDIO stuff */ diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 7a829ad777836c..30bfeb1004a33a 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -372,6 +372,7 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, struct qeth_card *card = dev_get_drvdata(dev); struct qeth_ipaddr *addr; int i, rc = 0; + bool enable; if (!card) return -EINVAL; @@ -384,25 +385,23 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, } if (sysfs_streq(buf, "toggle")) { - card->ipato.enabled = (card->ipato.enabled)? 0 : 1; - } else if (sysfs_streq(buf, "1")) { - card->ipato.enabled = 1; - hash_for_each(card->ip_htable, i, addr, hnode) { - if ((addr->type == QETH_IP_TYPE_NORMAL) && - qeth_l3_is_addr_covered_by_ipato(card, addr)) - addr->set_flags |= - QETH_IPA_SETIP_TAKEOVER_FLAG; - } - } else if (sysfs_streq(buf, "0")) { - card->ipato.enabled = 0; - hash_for_each(card->ip_htable, i, addr, hnode) { - if (addr->set_flags & - QETH_IPA_SETIP_TAKEOVER_FLAG) - addr->set_flags &= - ~QETH_IPA_SETIP_TAKEOVER_FLAG; - } - } else + enable = !card->ipato.enabled; + } else if (kstrtobool(buf, &enable)) { rc = -EINVAL; + goto out; + } + + if (card->ipato.enabled == enable) + goto out; + card->ipato.enabled = enable; + + hash_for_each(card->ip_htable, i, addr, hnode) { + if (!enable) + addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG; + else if (addr->type == QETH_IP_TYPE_NORMAL && + qeth_l3_is_addr_covered_by_ipato(card, addr)) + addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; + } out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; From e34a43e57c218bf28f8ec73a1ce68fec41c4b20c Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 13 Dec 2017 18:56:30 +0100 Subject: [PATCH 1128/3326] s390/qeth: don't apply takeover changes to RXIP [ Upstream commit b22d73d6689fd902a66c08ebe71ab2f3b351e22f ] When takeover is switched off, current code clears the 'TAKEOVER' flag on all IPs. But the flag is also used for RXIP addresses, and those should not be affected by the takeover mode. Fix the behaviour by consistenly applying takover logic to NORMAL addresses only. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/s390/net/qeth_l3_main.c | 5 +++-- drivers/s390/net/qeth_l3_sys.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 27185ab38136fc..56d1c756aef9ea 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -173,6 +173,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, if (!card->ipato.enabled) return 0; + if (addr->type != QETH_IP_TYPE_NORMAL) + return 0; qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, (addr->proto == QETH_PROT_IPV4)? 4:16); @@ -289,8 +291,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); addr->ref_counter = 1; - if (addr->type == QETH_IP_TYPE_NORMAL && - qeth_l3_is_addr_covered_by_ipato(card, addr)) { + if (qeth_l3_is_addr_covered_by_ipato(card, addr)) { QETH_CARD_TEXT(card, 2, "tkovaddr"); addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; } diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 30bfeb1004a33a..aada072ef0e3ea 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -396,10 +396,11 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, card->ipato.enabled = enable; hash_for_each(card->ip_htable, i, addr, hnode) { + if (addr->type != QETH_IP_TYPE_NORMAL) + continue; if (!enable) addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG; - else if (addr->type == QETH_IP_TYPE_NORMAL && - qeth_l3_is_addr_covered_by_ipato(card, addr)) + else if (qeth_l3_is_addr_covered_by_ipato(card, addr)) addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; } out: From 8658408f284efd553a882ee717326845130c51d9 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 13 Dec 2017 18:56:31 +0100 Subject: [PATCH 1129/3326] s390/qeth: lock IP table while applying takeover changes [ Upstream commit 8a03a3692b100d84785ee7a834e9215e304c9e00 ] Modifying the flags of an IP addr object needs to be protected against eg. concurrent removal of the same object from the IP table. Fixes: 5f78e29ceebf ("qeth: optimize IP handling in rx_mode callback") Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/s390/net/qeth_l3_sys.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index aada072ef0e3ea..a2634836fd1e5c 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -395,6 +395,7 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, goto out; card->ipato.enabled = enable; + spin_lock_bh(&card->ip_lock); hash_for_each(card->ip_htable, i, addr, hnode) { if (addr->type != QETH_IP_TYPE_NORMAL) continue; @@ -403,6 +404,7 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, else if (qeth_l3_is_addr_covered_by_ipato(card, addr)) addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; } + spin_unlock_bh(&card->ip_lock); out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; From 72b44d0434c1a4688b55aaa59150d74344721306 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 13 Dec 2017 18:56:32 +0100 Subject: [PATCH 1130/3326] s390/qeth: update takeover IPs after configuration change [ Upstream commit 02f510f326501470348a5df341e8232c3497bbbb ] Any modification to the takeover IP-ranges requires that we re-evaluate which IP addresses are takeover-eligible. Otherwise we might do takeover for some addresses when we no longer should, or vice-versa. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/s390/net/qeth_core.h | 4 +- drivers/s390/net/qeth_core_main.c | 4 +- drivers/s390/net/qeth_l3.h | 2 +- drivers/s390/net/qeth_l3_main.c | 31 +++++++++++++-- drivers/s390/net/qeth_l3_sys.c | 63 +++++++++++++++++-------------- 5 files changed, 67 insertions(+), 37 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 0a7bf3cbb70142..92dd4aef21a3b4 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -565,8 +565,8 @@ enum qeth_cq { struct qeth_ipato { bool enabled; - int invert4; - int invert6; + bool invert4; + bool invert6; struct list_head entries; }; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index be95f8c8f7a792..291eb89ecb06b3 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1480,8 +1480,8 @@ static int qeth_setup_card(struct qeth_card *card) /* IP address takeover */ INIT_LIST_HEAD(&card->ipato.entries); card->ipato.enabled = false; - card->ipato.invert4 = 0; - card->ipato.invert6 = 0; + card->ipato.invert4 = false; + card->ipato.invert6 = false; /* init QDIO stuff */ qeth_init_qdio_info(card); INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index 194ae9b577ccae..e5833837b799ec 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h @@ -82,7 +82,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *); int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, const u8 *); -int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); +void qeth_l3_update_ipato(struct qeth_card *card); struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions); int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *); int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 56d1c756aef9ea..36dee176f8e2c8 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -163,8 +163,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) } } -int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, - struct qeth_ipaddr *addr) +static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, + struct qeth_ipaddr *addr) { struct qeth_ipato_entry *ipatoe; u8 addr_bits[128] = {0, }; @@ -605,6 +605,27 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) /* * IP address takeover related functions */ + +/** + * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs. + * + * Caller must hold ip_lock. + */ +void qeth_l3_update_ipato(struct qeth_card *card) +{ + struct qeth_ipaddr *addr; + unsigned int i; + + hash_for_each(card->ip_htable, i, addr, hnode) { + if (addr->type != QETH_IP_TYPE_NORMAL) + continue; + if (qeth_l3_is_addr_covered_by_ipato(card, addr)) + addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; + else + addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG; + } +} + static void qeth_l3_clear_ipato_list(struct qeth_card *card) { struct qeth_ipato_entry *ipatoe, *tmp; @@ -616,6 +637,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card) kfree(ipatoe); } + qeth_l3_update_ipato(card); spin_unlock_bh(&card->ip_lock); } @@ -640,8 +662,10 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, } } - if (!rc) + if (!rc) { list_add_tail(&new->entry, &card->ipato.entries); + qeth_l3_update_ipato(card); + } spin_unlock_bh(&card->ip_lock); @@ -664,6 +688,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card, (proto == QETH_PROT_IPV4)? 4:16) && (ipatoe->mask_bits == mask_bits)) { list_del(&ipatoe->entry); + qeth_l3_update_ipato(card); kfree(ipatoe); } } diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index a2634836fd1e5c..1295dd8ec849db 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -370,9 +370,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); - struct qeth_ipaddr *addr; - int i, rc = 0; bool enable; + int rc = 0; if (!card) return -EINVAL; @@ -391,20 +390,12 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, goto out; } - if (card->ipato.enabled == enable) - goto out; - card->ipato.enabled = enable; - - spin_lock_bh(&card->ip_lock); - hash_for_each(card->ip_htable, i, addr, hnode) { - if (addr->type != QETH_IP_TYPE_NORMAL) - continue; - if (!enable) - addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG; - else if (qeth_l3_is_addr_covered_by_ipato(card, addr)) - addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; + if (card->ipato.enabled != enable) { + card->ipato.enabled = enable; + spin_lock_bh(&card->ip_lock); + qeth_l3_update_ipato(card); + spin_unlock_bh(&card->ip_lock); } - spin_unlock_bh(&card->ip_lock); out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; @@ -430,20 +421,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + bool invert; int rc = 0; if (!card) return -EINVAL; mutex_lock(&card->conf_mutex); - if (sysfs_streq(buf, "toggle")) - card->ipato.invert4 = (card->ipato.invert4)? 0 : 1; - else if (sysfs_streq(buf, "1")) - card->ipato.invert4 = 1; - else if (sysfs_streq(buf, "0")) - card->ipato.invert4 = 0; - else + if (sysfs_streq(buf, "toggle")) { + invert = !card->ipato.invert4; + } else if (kstrtobool(buf, &invert)) { rc = -EINVAL; + goto out; + } + + if (card->ipato.invert4 != invert) { + card->ipato.invert4 = invert; + spin_lock_bh(&card->ip_lock); + qeth_l3_update_ipato(card); + spin_unlock_bh(&card->ip_lock); + } +out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; } @@ -609,20 +607,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + bool invert; int rc = 0; if (!card) return -EINVAL; mutex_lock(&card->conf_mutex); - if (sysfs_streq(buf, "toggle")) - card->ipato.invert6 = (card->ipato.invert6)? 0 : 1; - else if (sysfs_streq(buf, "1")) - card->ipato.invert6 = 1; - else if (sysfs_streq(buf, "0")) - card->ipato.invert6 = 0; - else + if (sysfs_streq(buf, "toggle")) { + invert = !card->ipato.invert6; + } else if (kstrtobool(buf, &invert)) { rc = -EINVAL; + goto out; + } + + if (card->ipato.invert6 != invert) { + card->ipato.invert6 = invert; + spin_lock_bh(&card->ip_lock); + qeth_l3_update_ipato(card); + spin_unlock_bh(&card->ip_lock); + } +out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; } From 3bc400bad0e003d40a0a2412411aed7cbae16f96 Mon Sep 17 00:00:00 2001 From: Mohamed Ghannam Date: Sun, 10 Dec 2017 03:50:58 +0000 Subject: [PATCH 1131/3326] net: ipv4: fix for a race condition in raw_sendmsg [ Upstream commit 8f659a03a0ba9289b9aeb9b4470e6fb263d6f483 ] inet->hdrincl is racy, and could lead to uninitialized stack pointer usage, so its value should be read only once. Fixes: c008ba5bdc9f ("ipv4: Avoid reading user iov twice after raw_probe_proto_opt") Signed-off-by: Mohamed Ghannam Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/raw.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 33b70bfd1122f0..125c1eab3eaa6d 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -513,11 +513,16 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int err; struct ip_options_data opt_copy; struct raw_frag_vec rfv; + int hdrincl; err = -EMSGSIZE; if (len > 0xFFFF) goto out; + /* hdrincl should be READ_ONCE(inet->hdrincl) + * but READ_ONCE() doesn't work with bit fields + */ + hdrincl = inet->hdrincl; /* * Check the flags. */ @@ -593,7 +598,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) /* Linux does not mangle headers on raw sockets, * so that IP options + IP_HDRINCL is non-sense. */ - if (inet->hdrincl) + if (hdrincl) goto done; if (ipc.opt->opt.srr) { if (!daddr) @@ -615,12 +620,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, - inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, + hdrincl ? IPPROTO_RAW : sk->sk_protocol, inet_sk_flowi_flags(sk) | - (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), + (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), daddr, saddr, 0, 0, sk->sk_uid); - if (!inet->hdrincl) { + if (!hdrincl) { rfv.msg = msg; rfv.hlen = 0; @@ -645,7 +650,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) goto do_confirm; back_from_confirm: - if (inet->hdrincl) + if (hdrincl) err = raw_send_hdrinc(sk, &fl4, msg, len, &rt, msg->msg_flags, &ipc.sockc); From 9f49cbc7cd207f27ead81246d9553515aea68131 Mon Sep 17 00:00:00 2001 From: Tobias Jordan Date: Wed, 6 Dec 2017 15:23:23 +0100 Subject: [PATCH 1132/3326] net: mvmdio: disable/unprepare clocks in EPROBE_DEFER case [ Upstream commit 589bf32f09852041fbd3b7ce1a9e703f95c230ba ] add appropriate calls to clk_disable_unprepare() by jumping to out_mdio in case orion_mdio_probe() returns -EPROBE_DEFER. Found by Linux Driver Verification project (linuxtesting.org). Fixes: 3d604da1e954 ("net: mvmdio: get and enable optional clock") Signed-off-by: Tobias Jordan Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/marvell/mvmdio.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index c9798210fa0f6c..0495487f7b42e7 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -344,7 +344,8 @@ static int orion_mdio_probe(struct platform_device *pdev) dev->regs + MVMDIO_ERR_INT_MASK); } else if (dev->err_interrupt == -EPROBE_DEFER) { - return -EPROBE_DEFER; + ret = -EPROBE_DEFER; + goto out_mdio; } if (pdev->dev.of_node) From a3927015a4bbcb73d35cdbed24f81efeba1d7a6a Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Fri, 22 Dec 2017 10:15:20 -0800 Subject: [PATCH 1133/3326] sctp: Replace use of sockets_allocated with specified macro. [ Upstream commit 8cb38a602478e9f806571f6920b0a3298aabf042 ] The patch(180d8cd942ce) replaces all uses of struct sock fields' memory_pressure, memory_allocated, sockets_allocated, and sysctl_mem to accessor macros. But the sockets_allocated field of sctp sock is not replaced at all. Then replace it now for unifying the code. Fixes: 180d8cd942ce ("foundations of per-cgroup memory pressure controlling.") Cc: Glauber Costa Signed-off-by: Tonghao Zhang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index d6163f7aefb141..1977238dc02376 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4413,7 +4413,7 @@ static int sctp_init_sock(struct sock *sk) SCTP_DBG_OBJCNT_INC(sock); local_bh_disable(); - percpu_counter_inc(&sctp_sockets_allocated); + sk_sockets_allocated_inc(sk); sock_prot_inuse_add(net, sk->sk_prot, 1); /* Nothing can fail after this block, otherwise @@ -4457,7 +4457,7 @@ static void sctp_destroy_sock(struct sock *sk) } sctp_endpoint_free(sp->ep); local_bh_disable(); - percpu_counter_dec(&sctp_sockets_allocated); + sk_sockets_allocated_dec(sk); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); local_bh_enable(); } From 6d1c489810bcde9266ccdbedc0861a5dc9778f60 Mon Sep 17 00:00:00 2001 From: "Nikita V. Shirokov" Date: Wed, 6 Dec 2017 17:15:43 -0800 Subject: [PATCH 1134/3326] adding missing rcu_read_unlock in ipxip6_rcv [ Upstream commit 74c4b656c3d92ec4c824ea1a4afd726b7b6568c8 ] commit 8d79266bc48c ("ip6_tunnel: add collect_md mode to IPv6 tunnels") introduced new exit point in ipxip6_rcv. however rcu_read_unlock is missing there. this diff is fixing this v1->v2: instead of doing rcu_read_unlock in place, we are going to "drop" section (to prevent skb leakage) Fixes: 8d79266bc48c ("ip6_tunnel: add collect_md mode to IPv6 tunnels") Signed-off-by: Nikita V. Shirokov Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_tunnel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index a1c24443cd9e01..ef958d50746b5e 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -912,7 +912,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto, if (t->parms.collect_md) { tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0); if (!tun_dst) - return 0; + goto drop; } ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, log_ecn_error); From 44319591ffa28770f12f33b70e4269d8cda8cb5f Mon Sep 17 00:00:00 2001 From: Alexey Kodanev Date: Wed, 20 Dec 2017 19:36:03 +0300 Subject: [PATCH 1135/3326] ip6_gre: fix device features for ioctl setup [ Upstream commit e5a9336adb317db55eb3fe8200856096f3c71109 ] When ip6gre is created using ioctl, its features, such as scatter-gather, GSO and tx-checksumming will be turned off: # ip -f inet6 tunnel add gre6 mode ip6gre remote fd00::1 # ethtool -k gre6 (truncated output) tx-checksumming: off scatter-gather: off tcp-segmentation-offload: off generic-segmentation-offload: off [requested on] But when netlink is used, they will be enabled: # ip link add gre6 type ip6gre remote fd00::1 # ethtool -k gre6 (truncated output) tx-checksumming: on scatter-gather: on tcp-segmentation-offload: on generic-segmentation-offload: on This results in a loss of performance when gre6 is created via ioctl. The issue was found with LTP/gre tests. Fix it by moving the setup of device features to a separate function and invoke it with ndo_init callback because both netlink and ioctl will eventually call it via register_netdevice(): register_netdevice() - ndo_init() callback -> ip6gre_tunnel_init() or ip6gre_tap_init() - ip6gre_tunnel_init_common() - ip6gre_tnl_init_features() The moved code also contains two minor style fixes: * removed needless tab from GRE6_FEATURES on NETIF_F_HIGHDMA line. * fixed the issue reported by checkpatch: "Unnecessary parentheses around 'nt->encap.type == TUNNEL_ENCAP_NONE'" Fixes: ac4eb009e477 ("ip6gre: Add support for basic offloads offloads excluding GSO") Signed-off-by: Alexey Kodanev Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_gre.c | 57 ++++++++++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 25 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 5d6bee070871f1..7a2df6646486ae 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1020,6 +1020,36 @@ static void ip6gre_tunnel_setup(struct net_device *dev) eth_random_addr(dev->perm_addr); } +#define GRE6_FEATURES (NETIF_F_SG | \ + NETIF_F_FRAGLIST | \ + NETIF_F_HIGHDMA | \ + NETIF_F_HW_CSUM) + +static void ip6gre_tnl_init_features(struct net_device *dev) +{ + struct ip6_tnl *nt = netdev_priv(dev); + + dev->features |= GRE6_FEATURES; + dev->hw_features |= GRE6_FEATURES; + + if (!(nt->parms.o_flags & TUNNEL_SEQ)) { + /* TCP offload with GRE SEQ is not supported, nor + * can we support 2 levels of outer headers requiring + * an update. + */ + if (!(nt->parms.o_flags & TUNNEL_CSUM) || + nt->encap.type == TUNNEL_ENCAP_NONE) { + dev->features |= NETIF_F_GSO_SOFTWARE; + dev->hw_features |= NETIF_F_GSO_SOFTWARE; + } + + /* Can use a lockless transmit, unless we generate + * output sequences + */ + dev->features |= NETIF_F_LLTX; + } +} + static int ip6gre_tunnel_init_common(struct net_device *dev) { struct ip6_tnl *tunnel; @@ -1054,6 +1084,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) dev->mtu -= 8; + ip6gre_tnl_init_features(dev); + return 0; } @@ -1302,11 +1334,6 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = { .ndo_get_iflink = ip6_tnl_get_iflink, }; -#define GRE6_FEATURES (NETIF_F_SG | \ - NETIF_F_FRAGLIST | \ - NETIF_F_HIGHDMA | \ - NETIF_F_HW_CSUM) - static void ip6gre_tap_setup(struct net_device *dev) { @@ -1386,26 +1413,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev, nt->net = dev_net(dev); ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); - dev->features |= GRE6_FEATURES; - dev->hw_features |= GRE6_FEATURES; - - if (!(nt->parms.o_flags & TUNNEL_SEQ)) { - /* TCP offload with GRE SEQ is not supported, nor - * can we support 2 levels of outer headers requiring - * an update. - */ - if (!(nt->parms.o_flags & TUNNEL_CSUM) || - (nt->encap.type == TUNNEL_ENCAP_NONE)) { - dev->features |= NETIF_F_GSO_SOFTWARE; - dev->hw_features |= NETIF_F_GSO_SOFTWARE; - } - - /* Can use a lockless transmit, unless we generate - * output sequences - */ - dev->features |= NETIF_F_LLTX; - } - err = register_netdevice(dev); if (err) goto out; From 27ccace9b982ea796e15ba9dc6af5941539c6be8 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 20 Dec 2017 19:34:19 +0200 Subject: [PATCH 1136/3326] ipv4: Fix use-after-free when flushing FIB tables [ Upstream commit b4681c2829e24943aadd1a7bb3a30d41d0a20050 ] Since commit 0ddcf43d5d4a ("ipv4: FIB Local/MAIN table collapse") the local table uses the same trie allocated for the main table when custom rules are not in use. When a net namespace is dismantled, the main table is flushed and freed (via an RCU callback) before the local table. In case the callback is invoked before the local table is iterated, a use-after-free can occur. Fix this by iterating over the FIB tables in reverse order, so that the main table is always freed after the local table. v3: Reworded comment according to Alex's suggestion. v2: Add a comment to make the fix more explicit per Dave's and Alex's feedback. Fixes: 0ddcf43d5d4a ("ipv4: FIB Local/MAIN table collapse") Signed-off-by: Ido Schimmel Reported-by: Fengguang Wu Acked-by: Alexander Duyck Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/fib_frontend.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 37819ab4cc7493..d72874150905b6 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1274,14 +1274,19 @@ static int __net_init ip_fib_net_init(struct net *net) static void ip_fib_net_exit(struct net *net) { - unsigned int i; + int i; rtnl_lock(); #ifdef CONFIG_IP_MULTIPLE_TABLES RCU_INIT_POINTER(net->ipv4.fib_main, NULL); RCU_INIT_POINTER(net->ipv4.fib_default, NULL); #endif - for (i = 0; i < FIB_TABLE_HASHSZ; i++) { + /* Destroy the tables in reverse order to guarantee that the + * local table, ID 255, is destroyed before the main table, ID + * 254. This is necessary as the local table may contain + * references to data contained in the main table. + */ + for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) { struct hlist_head *head = &net->ipv4.fib_table_hash[i]; struct hlist_node *tmp; struct fib_table *tb; From 126f42ecfcb48ec50b289124f23dafa499012650 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Mon, 18 Dec 2017 17:35:09 +0200 Subject: [PATCH 1137/3326] net: bridge: fix early call to br_stp_change_bridge_id and plug newlink leaks [ Upstream commit 84aeb437ab98a2bce3d4b2111c79723aedfceb33 ] The early call to br_stp_change_bridge_id in bridge's newlink can cause a memory leak if an error occurs during the newlink because the fdb entries are not cleaned up if a different lladdr was specified, also another minor issue is that it generates fdb notifications with ifindex = 0. Another unrelated memory leak is the bridge sysfs entries which get added on NETDEV_REGISTER event, but are not cleaned up in the newlink error path. To remove this special case the call to br_stp_change_bridge_id is done after netdev register and we cleanup the bridge on changelink error via br_dev_delete to plug all leaks. This patch makes netlink bridge destruction on newlink error the same as dellink and ioctl del which is necessary since at that point we have a fully initialized bridge device. To reproduce the issue: $ ip l add br0 address 00:11:22:33:44:55 type bridge group_fwd_mask 1 RTNETLINK answers: Invalid argument $ rmmod bridge [ 1822.142525] ============================================================================= [ 1822.143640] BUG bridge_fdb_cache (Tainted: G O ): Objects remaining in bridge_fdb_cache on __kmem_cache_shutdown() [ 1822.144821] ----------------------------------------------------------------------------- [ 1822.145990] Disabling lock debugging due to kernel taint [ 1822.146732] INFO: Slab 0x0000000092a844b2 objects=32 used=2 fp=0x00000000fef011b0 flags=0x1ffff8000000100 [ 1822.147700] CPU: 2 PID: 13584 Comm: rmmod Tainted: G B O 4.15.0-rc2+ #87 [ 1822.148578] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.7.5-20140531_083030-gandalf 04/01/2014 [ 1822.150008] Call Trace: [ 1822.150510] dump_stack+0x78/0xa9 [ 1822.151156] slab_err+0xb1/0xd3 [ 1822.151834] ? __kmalloc+0x1bb/0x1ce [ 1822.152546] __kmem_cache_shutdown+0x151/0x28b [ 1822.153395] shutdown_cache+0x13/0x144 [ 1822.154126] kmem_cache_destroy+0x1c0/0x1fb [ 1822.154669] SyS_delete_module+0x194/0x244 [ 1822.155199] ? trace_hardirqs_on_thunk+0x1a/0x1c [ 1822.155773] entry_SYSCALL_64_fastpath+0x23/0x9a [ 1822.156343] RIP: 0033:0x7f929bd38b17 [ 1822.156859] RSP: 002b:00007ffd160e9a98 EFLAGS: 00000202 ORIG_RAX: 00000000000000b0 [ 1822.157728] RAX: ffffffffffffffda RBX: 00005578316ba090 RCX: 00007f929bd38b17 [ 1822.158422] RDX: 00007f929bd9ec60 RSI: 0000000000000800 RDI: 00005578316ba0f0 [ 1822.159114] RBP: 0000000000000003 R08: 00007f929bff5f20 R09: 00007ffd160e8a11 [ 1822.159808] R10: 00007ffd160e9860 R11: 0000000000000202 R12: 00007ffd160e8a80 [ 1822.160513] R13: 0000000000000000 R14: 0000000000000000 R15: 00005578316ba090 [ 1822.161278] INFO: Object 0x000000007645de29 @offset=0 [ 1822.161666] INFO: Object 0x00000000d5df2ab5 @offset=128 Fixes: 30313a3d5794 ("bridge: Handle IFLA_ADDRESS correctly when creating bridge device") Fixes: 5b8d5429daa0 ("bridge: netlink: register netdevice before executing changelink") Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/bridge/br_netlink.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index de215273080932..08190db0a2dca5 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1223,19 +1223,20 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev, struct net_bridge *br = netdev_priv(dev); int err; + err = register_netdevice(dev); + if (err) + return err; + if (tb[IFLA_ADDRESS]) { spin_lock_bh(&br->lock); br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); spin_unlock_bh(&br->lock); } - err = register_netdevice(dev); - if (err) - return err; - err = br_changelink(dev, tb, data, extack); if (err) - unregister_netdevice(dev); + br_dev_delete(dev, NULL); + return err; } From dd9a2648b3e35c2369f580215d916baf7e23253a Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 19 Dec 2017 11:27:56 -0600 Subject: [PATCH 1138/3326] net: Fix double free and memory corruption in get_net_ns_by_id() [ Upstream commit 21b5944350052d2583e82dd59b19a9ba94a007f0 ] (I can trivially verify that that idr_remove in cleanup_net happens after the network namespace count has dropped to zero --EWB) Function get_net_ns_by_id() does not check for net::count after it has found a peer in netns_ids idr. It may dereference a peer, after its count has already been finaly decremented. This leads to double free and memory corruption: put_net(peer) rtnl_lock() atomic_dec_and_test(&peer->count) [count=0] ... __put_net(peer) get_net_ns_by_id(net, id) spin_lock(&cleanup_list_lock) list_add(&net->cleanup_list, &cleanup_list) spin_unlock(&cleanup_list_lock) queue_work() peer = idr_find(&net->netns_ids, id) | get_net(peer) [count=1] | ... | (use after final put) v ... cleanup_net() ... spin_lock(&cleanup_list_lock) ... list_replace_init(&cleanup_list, ..) ... spin_unlock(&cleanup_list_lock) ... ... ... ... put_net(peer) ... atomic_dec_and_test(&peer->count) [count=0] ... spin_lock(&cleanup_list_lock) ... list_add(&net->cleanup_list, &cleanup_list) ... spin_unlock(&cleanup_list_lock) ... queue_work() ... rtnl_unlock() rtnl_lock() ... for_each_net(tmp) { ... id = __peernet2id(tmp, peer) ... spin_lock_irq(&tmp->nsid_lock) ... idr_remove(&tmp->netns_ids, id) ... ... ... net_drop_ns() ... net_free(peer) ... } ... | v cleanup_net() ... (Second free of peer) Also, put_net() on the right cpu may reorder with left's cpu list_replace_init(&cleanup_list, ..), and then cleanup_list will be corrupted. Since cleanup_net() is executed in worker thread, while put_net(peer) can happen everywhere, there should be enough time for concurrent get_net_ns_by_id() to pick the peer up, and the race does not seem to be unlikely. The patch fixes the problem in standard way. (Also, there is possible problem in peernet2id_alloc(), which requires check for net::count under nsid_lock and maybe_get_net(peer), but in current stable kernel it's used under rtnl_lock() and it has to be safe. Openswitch begun to use peernet2id_alloc(), and possibly it should be fixed too. While this is not in stable kernel yet, so I'll send a separate message to netdev@ later). Cc: Nicolas Dichtel Signed-off-by: Kirill Tkhai Fixes: 0c7aecd4bde4 "netns: add rtnl cmd to add and get peer netns ids" Reviewed-by: Andrey Ryabinin Reviewed-by: "Eric W. Biederman" Signed-off-by: Eric W. Biederman Reviewed-by: Eric Dumazet Acked-by: Nicolas Dichtel Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/net_namespace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 6cfdc7c84c480f..0dd6359e592428 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -266,7 +266,7 @@ struct net *get_net_ns_by_id(struct net *net, int id) spin_lock_bh(&net->nsid_lock); peer = idr_find(&net->netns_ids, id); if (peer) - get_net(peer); + peer = maybe_get_net(peer); spin_unlock_bh(&net->nsid_lock); rcu_read_unlock(); From 003514ffb447120d1a5997827ad5226d7b5a82e1 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Wed, 20 Dec 2017 18:45:10 -0600 Subject: [PATCH 1139/3326] net: phy: micrel: ksz9031: reconfigure autoneg after phy autoneg workaround [ Upstream commit c1a8d0a3accf64a014d605e6806ce05d1c17adf1 ] Under some circumstances driver will perform PHY reset in ksz9031_read_status() to fix autoneg failure case (idle error count = 0xFF). When this happens ksz9031 will not detect link status change any more when connecting to Netgear 1G switch (link can be recovered sometimes by restarting netdevice "ifconfig down up"). Reproduced with TI am572x board equipped with ksz9031 PHY while connecting to Netgear 1G switch. Fix the issue by reconfiguring autonegotiation after PHY reset in ksz9031_read_status(). Fixes: d2fd719bcb0e ("net/phy: micrel: Add workaround for bad autoneg") Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/micrel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index fdb43dd9b5cd42..6c45ff650ec78a 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -622,6 +622,7 @@ static int ksz9031_read_status(struct phy_device *phydev) phydev->link = 0; if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) phydev->drv->config_intr(phydev); + return genphy_config_aneg(phydev); } return 0; From 265ba7a046c05f67f80c2540ec3212dafe5b3673 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Wed, 13 Dec 2017 14:41:06 -0500 Subject: [PATCH 1140/3326] sock: free skb in skb_complete_tx_timestamp on error [ Upstream commit 35b99dffc3f710cafceee6c8c6ac6a98eb2cb4bf ] skb_complete_tx_timestamp must ingest the skb it is passed. Call kfree_skb if the skb cannot be enqueued. Fixes: b245be1f4db1 ("net-timestamp: no-payload only sysctl") Fixes: 9ac25fc06375 ("net: fix socket refcounting in skb_complete_tx_timestamp()") Reported-by: Richard Cochran Signed-off-by: Willem de Bruijn Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/skbuff.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index e140ba49b30a49..623143d7844d84 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4296,7 +4296,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, struct sock *sk = skb->sk; if (!skb_may_tx_timestamp(sk, false)) - return; + goto err; /* Take a reference to prevent skb_orphan() from freeing the socket, * but only if the socket refcount is not zero. @@ -4305,7 +4305,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, *skb_hwtstamps(skb) = *hwtstamps; __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); sock_put(sk); + return; } + +err: + kfree_skb(skb); } EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); From f35318b289446015ce2d2cbfd240ba47bda6f187 Mon Sep 17 00:00:00 2001 From: Yousuk Seung Date: Thu, 7 Dec 2017 13:41:34 -0800 Subject: [PATCH 1141/3326] tcp: invalidate rate samples during SACK reneging [ Upstream commit d4761754b4fb2ef8d9a1e9d121c4bec84e1fe292 ] Mark tcp_sock during a SACK reneging event and invalidate rate samples while marked. Such rate samples may overestimate bw by including packets that were SACKed before reneging. < ack 6001 win 10000 sack 7001:38001 < ack 7001 win 0 sack 8001:38001 // Reneg detected > seq 7001:8001 // RTO, SACK cleared. < ack 38001 win 10000 In above example the rate sample taken after the last ack will count 7001-38001 as delivered while the actual delivery rate likely could be much lower i.e. 7001-8001. This patch adds a new field tcp_sock.sack_reneg and marks it when we declare SACK reneging and entering TCP_CA_Loss, and unmarks it after the last rate sample was taken before moving back to TCP_CA_Open. This patch also invalidates rate samples taken while tcp_sock.is_sack_reneg is set. Fixes: b9f64820fb22 ("tcp: track data delivery rate for a TCP connection") Signed-off-by: Yousuk Seung Signed-off-by: Neal Cardwell Signed-off-by: Yuchung Cheng Acked-by: Soheil Hassas Yeganeh Acked-by: Eric Dumazet Acked-by: Priyaranjan Jha Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/linux/tcp.h | 3 ++- include/net/tcp.h | 2 +- net/ipv4/tcp.c | 1 + net/ipv4/tcp_input.c | 10 ++++++++-- net/ipv4/tcp_rate.c | 10 +++++++--- 5 files changed, 19 insertions(+), 7 deletions(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 4aa40ef02d32cf..e8418fc77a43f9 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -214,7 +214,8 @@ struct tcp_sock { u8 chrono_type:2, /* current chronograph type */ rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */ - unused:4; + is_sack_reneg:1, /* in recovery from loss with SACK reneg? */ + unused:3; u8 nonagle : 4,/* Disable Nagle algorithm? */ thin_lto : 1,/* Use linear timeouts for thin streams */ unused1 : 1, diff --git a/include/net/tcp.h b/include/net/tcp.h index 6ced69940f5c83..0a13574134b8b3 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1085,7 +1085,7 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb); void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, struct rate_sample *rs); void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, - struct rate_sample *rs); + bool is_sack_reneg, struct rate_sample *rs); void tcp_rate_check_app_limited(struct sock *sk); /* These functions determine how the current flow behaves in respect of SACK diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5091402720abca..a0c72b09cefcdd 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2356,6 +2356,7 @@ int tcp_disconnect(struct sock *sk, int flags) tp->snd_cwnd_cnt = 0; tp->window_clamp = 0; tcp_set_ca_state(sk, TCP_CA_Open); + tp->is_sack_reneg = 0; tcp_clear_retrans(tp); inet_csk_delack_init(sk); /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c5447b9f85175c..a965a38211ba4b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1975,6 +1975,8 @@ void tcp_enter_loss(struct sock *sk) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); tp->sacked_out = 0; tp->fackets_out = 0; + /* Mark SACK reneging until we recover from this loss event. */ + tp->is_sack_reneg = 1; } tcp_clear_all_retrans_hints(tp); @@ -2428,6 +2430,7 @@ static bool tcp_try_undo_recovery(struct sock *sk) return true; } tcp_set_ca_state(sk, TCP_CA_Open); + tp->is_sack_reneg = 0; return false; } @@ -2459,8 +2462,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); inet_csk(sk)->icsk_retransmits = 0; - if (frto_undo || tcp_is_sack(tp)) + if (frto_undo || tcp_is_sack(tp)) { tcp_set_ca_state(sk, TCP_CA_Open); + tp->is_sack_reneg = 0; + } return true; } return false; @@ -3551,6 +3556,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) struct tcp_sacktag_state sack_state; struct rate_sample rs = { .prior_delivered = 0 }; u32 prior_snd_una = tp->snd_una; + bool is_sack_reneg = tp->is_sack_reneg; u32 ack_seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; bool is_dupack = false; @@ -3666,7 +3672,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ lost = tp->lost - lost; /* freshly marked lost */ - tcp_rate_gen(sk, delivered, lost, sack_state.rate); + tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate); tcp_cong_control(sk, ack, delivered, flag, sack_state.rate); tcp_xmit_recovery(sk, rexmit); return 1; diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c index 3330a370d3061e..c61240e43923d6 100644 --- a/net/ipv4/tcp_rate.c +++ b/net/ipv4/tcp_rate.c @@ -106,7 +106,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, /* Update the connection delivery information and generate a rate sample. */ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, - struct rate_sample *rs) + bool is_sack_reneg, struct rate_sample *rs) { struct tcp_sock *tp = tcp_sk(sk); u32 snd_us, ack_us; @@ -124,8 +124,12 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, rs->acked_sacked = delivered; /* freshly ACKed or SACKed */ rs->losses = lost; /* freshly marked lost */ - /* Return an invalid sample if no timing information is available. */ - if (!rs->prior_mstamp) { + /* Return an invalid sample if no timing information is available or + * in recovery from loss with SACK reneging. Rate samples taken during + * a SACK reneging event may overestimate bw by including packets that + * were SACKed before the reneg. + */ + if (!rs->prior_mstamp || is_sack_reneg) { rs->delivered = -1; rs->interval_us = -1; return; From 3ddcb727c71779c23097d440355c4abe27379558 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Mon, 13 Nov 2017 10:11:27 +0200 Subject: [PATCH 1142/3326] net/mlx5: Fix rate limit packet pacing naming and struct [ Upstream commit 37e92a9d4fe38dc3e7308913575983a6a088c8d4 ] In mlx5_ifc, struct size was not complete, and thus driver was sending garbage after the last defined field. Fixed it by adding reserved field to complete the struct size. In addition, rename all set_rate_limit to set_pp_rate_limit to be compliant with the Firmware <-> Driver definition. Fixes: 7486216b3a0b ("{net,IB}/mlx5: mlx5_ifc updates") Fixes: 1466cc5b23d1 ("net/mlx5: Rate limit tables support") Signed-off-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/rl.c | 22 +++++++++---------- include/linux/mlx5/mlx5_ifc.h | 8 ++++--- 3 files changed, 18 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 1fffdebbc9e899..e9a1fbcc4adfa6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -362,7 +362,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_QUERY_Q_COUNTER: - case MLX5_CMD_OP_SET_RATE_LIMIT: + case MLX5_CMD_OP_SET_PP_RATE_LIMIT: case MLX5_CMD_OP_QUERY_RATE_LIMIT: case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: @@ -505,7 +505,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); - MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT); + MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT); MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index e651e4c0286774..d3c33e9eea7292 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -125,16 +125,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, return ret_entry; } -static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev, +static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev, u32 rate, u16 index) { - u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0}; - MLX5_SET(set_rate_limit_in, in, opcode, - MLX5_CMD_OP_SET_RATE_LIMIT); - MLX5_SET(set_rate_limit_in, in, rate_limit_index, index); - MLX5_SET(set_rate_limit_in, in, rate_limit, rate); + MLX5_SET(set_pp_rate_limit_in, in, opcode, + MLX5_CMD_OP_SET_PP_RATE_LIMIT); + MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index); + MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } @@ -173,7 +173,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index) entry->refcount++; } else { /* new rate limit */ - err = mlx5_set_rate_limit_cmd(dev, rate, entry->index); + err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index); if (err) { mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n", rate, err); @@ -209,7 +209,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate) entry->refcount--; if (!entry->refcount) { /* need to remove rate */ - mlx5_set_rate_limit_cmd(dev, 0, entry->index); + mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index); entry->rate = 0; } @@ -262,8 +262,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev) /* Clear all configured rates */ for (i = 0; i < table->max_size; i++) if (table->rl_entry[i].rate) - mlx5_set_rate_limit_cmd(dev, 0, - table->rl_entry[i].index); + mlx5_set_pp_rate_limit_cmd(dev, 0, + table->rl_entry[i].index); kfree(dev->priv.rl_table.rl_entry); } diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 69772347f86668..c8091f06eaa4c1 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -147,7 +147,7 @@ enum { MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, - MLX5_CMD_OP_SET_RATE_LIMIT = 0x780, + MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780, MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783, @@ -7233,7 +7233,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits { u8 vxlan_udp_port[0x10]; }; -struct mlx5_ifc_set_rate_limit_out_bits { +struct mlx5_ifc_set_pp_rate_limit_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -7242,7 +7242,7 @@ struct mlx5_ifc_set_rate_limit_out_bits { u8 reserved_at_40[0x40]; }; -struct mlx5_ifc_set_rate_limit_in_bits { +struct mlx5_ifc_set_pp_rate_limit_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -7255,6 +7255,8 @@ struct mlx5_ifc_set_rate_limit_in_bits { u8 reserved_at_60[0x20]; u8 rate_limit[0x20]; + + u8 reserved_at_a0[0x160]; }; struct mlx5_ifc_access_register_out_bits { From 2dc5654e6fbc46161c56058081258bd3ad8d1ab0 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 23 Nov 2017 13:52:28 +0200 Subject: [PATCH 1143/3326] net/mlx5e: Fix possible deadlock of VXLAN lock [ Upstream commit 6323514116404cc651df1b7fffa1311ddf8ce647 ] mlx5e_vxlan_lookup_port is called both from mlx5e_add_vxlan_port (user context) and mlx5e_features_check (softirq), but the lock acquired does not disable bottom half and might result in deadlock. Fix it by simply replacing spin_lock() with spin_lock_bh(). While at it, replace all unnecessary spin_lock_irq() to spin_lock_bh(). lockdep's WARNING: inconsistent lock state [ 654.028136] inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. [ 654.028229] swapper/5/0 [HC0[0]:SC1[9]:HE1:SE0] takes: [ 654.028321] (&(&vxlan_db->lock)->rlock){+.?.}, at: [] mlx5e_vxlan_lookup_port+0x1e/0x50 [mlx5_core] [ 654.028528] {SOFTIRQ-ON-W} state was registered at: [ 654.028607] _raw_spin_lock+0x3c/0x70 [ 654.028689] mlx5e_vxlan_lookup_port+0x1e/0x50 [mlx5_core] [ 654.028794] mlx5e_vxlan_add_port+0x2e/0x120 [mlx5_core] [ 654.028878] process_one_work+0x1e9/0x640 [ 654.028942] worker_thread+0x4a/0x3f0 [ 654.029002] kthread+0x141/0x180 [ 654.029056] ret_from_fork+0x24/0x30 [ 654.029114] irq event stamp: 579088 [ 654.029174] hardirqs last enabled at (579088): [] ip6_finish_output2+0x49a/0x8c0 [ 654.029309] hardirqs last disabled at (579087): [] ip6_finish_output2+0x44e/0x8c0 [ 654.029446] softirqs last enabled at (579030): [] irq_enter+0x6d/0x80 [ 654.029567] softirqs last disabled at (579031): [] irq_exit+0xb5/0xc0 [ 654.029684] other info that might help us debug this: [ 654.029781] Possible unsafe locking scenario: [ 654.029868] CPU0 [ 654.029908] ---- [ 654.029947] lock(&(&vxlan_db->lock)->rlock); [ 654.030045] [ 654.030090] lock(&(&vxlan_db->lock)->rlock); [ 654.030162] *** DEADLOCK *** Fixes: b3f63c3d5e2c ("net/mlx5e: Add netdev support for VXLAN tunneling") Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- .../net/ethernet/mellanox/mlx5/core/vxlan.c | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index 07a9ba6cfc70a1..f8238275759f79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5e_vxlan *vxlan; - spin_lock(&vxlan_db->lock); + spin_lock_bh(&vxlan_db->lock); vxlan = radix_tree_lookup(&vxlan_db->tree, port); - spin_unlock(&vxlan_db->lock); + spin_unlock_bh(&vxlan_db->lock); return vxlan; } @@ -100,9 +100,9 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) vxlan->udp_port = port; - spin_lock_irq(&vxlan_db->lock); + spin_lock_bh(&vxlan_db->lock); err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan); - spin_unlock_irq(&vxlan_db->lock); + spin_unlock_bh(&vxlan_db->lock); if (err) goto err_free; @@ -121,9 +121,9 @@ static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5e_vxlan *vxlan; - spin_lock_irq(&vxlan_db->lock); + spin_lock_bh(&vxlan_db->lock); vxlan = radix_tree_delete(&vxlan_db->tree, port); - spin_unlock_irq(&vxlan_db->lock); + spin_unlock_bh(&vxlan_db->lock); if (!vxlan) return; @@ -171,12 +171,12 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) struct mlx5e_vxlan *vxlan; unsigned int port = 0; - spin_lock_irq(&vxlan_db->lock); + spin_lock_bh(&vxlan_db->lock); while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) { port = vxlan->udp_port; - spin_unlock_irq(&vxlan_db->lock); + spin_unlock_bh(&vxlan_db->lock); __mlx5e_vxlan_core_del_port(priv, (u16)port); - spin_lock_irq(&vxlan_db->lock); + spin_lock_bh(&vxlan_db->lock); } - spin_unlock_irq(&vxlan_db->lock); + spin_unlock_bh(&vxlan_db->lock); } From 597181622e649b30a4c4d6bec4816d2f8987d25a Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 21 Nov 2017 17:49:36 +0200 Subject: [PATCH 1144/3326] net/mlx5e: Fix features check of IPv6 traffic [ Upstream commit 2989ad1ec03021ee6d2193c35414f1d970a243de ] The assumption that the next header field contains the transport protocol is wrong for IPv6 packets with extension headers. Instead, we should look the inner-most next header field in the buffer. This will fix TSO offload for tunnels over IPv6 with extension headers. Performance testing: 19.25x improvement, cool! Measuring bandwidth of 16 threads TCP traffic over IPv6 GRE tap. CPU: Intel(R) Xeon(R) CPU E5-2660 v2 @ 2.20GHz NIC: Mellanox Technologies MT28800 Family [ConnectX-5 Ex] TSO: Enabled Before: 4,926.24 Mbps Now : 94,827.91 Mbps Fixes: b3f63c3d5e2c ("net/mlx5e: Add netdev support for VXLAN tunneling") Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index cc11bbbd0309d4..517727f277c5fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3554,6 +3554,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, struct sk_buff *skb, netdev_features_t features) { + unsigned int offset = 0; struct udphdr *udph; u8 proto; u16 port; @@ -3563,7 +3564,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, proto = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6): - proto = ipv6_hdr(skb)->nexthdr; + proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL); break; default: goto out; From c4d0e614c151f102967ff38bdf7c4372c146a5a6 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 3 Dec 2017 13:58:50 +0200 Subject: [PATCH 1145/3326] net/mlx5e: Add refcount to VXLAN structure [ Upstream commit 23f4cc2cd9ed92570647220aca60d0197d8c1fa9 ] A refcount mechanism must be implemented in order to prevent unwanted scenarios such as: - Open an IPv4 VXLAN interface - Open an IPv6 VXLAN interface (different socket) - Remove one of the interfaces With current implementation, the UDP port will be removed from our VXLAN database and turn off the offloads for the other interface, which is still active. The reference count mechanism will only allow UDP port removals once all consumers are gone. Fixes: b3f63c3d5e2c ("net/mlx5e: Add netdev support for VXLAN tunneling") Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- .../net/ethernet/mellanox/mlx5/core/vxlan.c | 50 ++++++++++--------- .../net/ethernet/mellanox/mlx5/core/vxlan.h | 1 + 2 files changed, 28 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index f8238275759f79..25f7823446678d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -88,8 +88,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) struct mlx5e_vxlan *vxlan; int err; - if (mlx5e_vxlan_lookup_port(priv, port)) + vxlan = mlx5e_vxlan_lookup_port(priv, port); + if (vxlan) { + atomic_inc(&vxlan->refcount); goto free_work; + } if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) goto free_work; @@ -99,6 +102,7 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) goto err_delete_port; vxlan->udp_port = port; + atomic_set(&vxlan->refcount, 1); spin_lock_bh(&vxlan_db->lock); err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan); @@ -116,32 +120,33 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) kfree(vxlan_work); } -static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) +static void mlx5e_vxlan_del_port(struct work_struct *work) { + struct mlx5e_vxlan_work *vxlan_work = + container_of(work, struct mlx5e_vxlan_work, work); + struct mlx5e_priv *priv = vxlan_work->priv; struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + u16 port = vxlan_work->port; struct mlx5e_vxlan *vxlan; + bool remove = false; spin_lock_bh(&vxlan_db->lock); - vxlan = radix_tree_delete(&vxlan_db->tree, port); - spin_unlock_bh(&vxlan_db->lock); - + vxlan = radix_tree_lookup(&vxlan_db->tree, port); if (!vxlan) - return; - - mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port); - - kfree(vxlan); -} + goto out_unlock; -static void mlx5e_vxlan_del_port(struct work_struct *work) -{ - struct mlx5e_vxlan_work *vxlan_work = - container_of(work, struct mlx5e_vxlan_work, work); - struct mlx5e_priv *priv = vxlan_work->priv; - u16 port = vxlan_work->port; + if (atomic_dec_and_test(&vxlan->refcount)) { + radix_tree_delete(&vxlan_db->tree, port); + remove = true; + } - __mlx5e_vxlan_core_del_port(priv, port); +out_unlock: + spin_unlock_bh(&vxlan_db->lock); + if (remove) { + mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); + kfree(vxlan); + } kfree(vxlan_work); } @@ -171,12 +176,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) struct mlx5e_vxlan *vxlan; unsigned int port = 0; - spin_lock_bh(&vxlan_db->lock); + /* Lockless since we are the only radix-tree consumers, wq is disabled */ while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) { port = vxlan->udp_port; - spin_unlock_bh(&vxlan_db->lock); - __mlx5e_vxlan_core_del_port(priv, (u16)port); - spin_lock_bh(&vxlan_db->lock); + radix_tree_delete(&vxlan_db->tree, port); + mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); + kfree(vxlan); } - spin_unlock_bh(&vxlan_db->lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h index 5def12c048e389..5ef6ae7d568abc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -36,6 +36,7 @@ #include "en.h" struct mlx5e_vxlan { + atomic_t refcount; u16 udp_port; }; From 999755ec40a6206d0c9f3edfffe2b00a7a66a6e9 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Mon, 4 Dec 2017 09:57:43 +0200 Subject: [PATCH 1146/3326] net/mlx5e: Prevent possible races in VXLAN control flow [ Upstream commit 0c1cc8b2215f5122ca614b5adca60346018758c3 ] When calling add/remove VXLAN port, a lock must be held in order to prevent race scenarios when more than one add/remove happens at the same time. Fix by holding our state_lock (mutex) as done by all other parts of the driver. Note that the spinlock protecting the radix-tree is still needed in order to synchronize radix-tree access from softirq context. Fixes: b3f63c3d5e2c ("net/mlx5e: Add netdev support for VXLAN tunneling") Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/vxlan.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index 25f7823446678d..2f74953e456151 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -88,6 +88,7 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) struct mlx5e_vxlan *vxlan; int err; + mutex_lock(&priv->state_lock); vxlan = mlx5e_vxlan_lookup_port(priv, port); if (vxlan) { atomic_inc(&vxlan->refcount); @@ -117,6 +118,7 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) err_delete_port: mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); free_work: + mutex_unlock(&priv->state_lock); kfree(vxlan_work); } @@ -130,6 +132,7 @@ static void mlx5e_vxlan_del_port(struct work_struct *work) struct mlx5e_vxlan *vxlan; bool remove = false; + mutex_lock(&priv->state_lock); spin_lock_bh(&vxlan_db->lock); vxlan = radix_tree_lookup(&vxlan_db->tree, port); if (!vxlan) @@ -147,6 +150,7 @@ static void mlx5e_vxlan_del_port(struct work_struct *work) mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); kfree(vxlan); } + mutex_unlock(&priv->state_lock); kfree(vxlan_work); } From bf070305213031e1300070d10334ac012116d470 Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Mon, 4 Dec 2017 08:59:25 +0200 Subject: [PATCH 1147/3326] net/mlx5: Fix error flow in CREATE_QP command [ Upstream commit dbff26e44dc3ec4de6578733b054a0114652a764 ] In error flow, when DESTROY_QP command should be executed, the wrong mailbox was set with data, not the one that is written to hardware, Fix that. Fixes: 09a7d9eca1a6 '{net,IB}/mlx5: QP/XRCD commands via mlx5 ifc' Signed-off-by: Moni Shoua Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/qp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index db9e665ab10474..889130edb71525 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -213,8 +213,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, err_cmd: memset(din, 0, sizeof(din)); memset(dout, 0, sizeof(dout)); - MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); - MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); + MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP); + MLX5_SET(destroy_qp_in, din, qpn, qp->qpn); mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); return err; } From a6cc63e125ffb3ae9f6b4e2b4642ddea9a932b46 Mon Sep 17 00:00:00 2001 From: Eric Garver Date: Wed, 20 Dec 2017 15:09:22 -0500 Subject: [PATCH 1148/3326] openvswitch: Fix pop_vlan action for double tagged frames [ Upstream commit c48e74736fccf25fb32bb015426359e1c2016e3b ] skb_vlan_pop() expects skb->protocol to be a valid TPID for double tagged frames. So set skb->protocol to the TPID and let skb_vlan_pop() shift the true ethertype into position for us. Fixes: 5108bbaddc37 ("openvswitch: add processing of L3 packets") Signed-off-by: Eric Garver Reviewed-by: Jiri Benc Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/openvswitch/flow.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index cfb652a4e00773..dbe1079a1651eb 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -532,6 +532,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) return -EINVAL; skb_reset_network_header(skb); + key->eth.type = skb->protocol; } else { eth = eth_hdr(skb); ether_addr_copy(key->eth.src, eth->h_source); @@ -545,15 +546,23 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) if (unlikely(parse_vlan(skb, key))) return -ENOMEM; - skb->protocol = parse_ethertype(skb); - if (unlikely(skb->protocol == htons(0))) + key->eth.type = parse_ethertype(skb); + if (unlikely(key->eth.type == htons(0))) return -ENOMEM; + /* Multiple tagged packets need to retain TPID to satisfy + * skb_vlan_pop(), which will later shift the ethertype into + * skb->protocol. + */ + if (key->eth.cvlan.tci & htons(VLAN_TAG_PRESENT)) + skb->protocol = key->eth.cvlan.tpid; + else + skb->protocol = key->eth.type; + skb_reset_network_header(skb); __skb_push(skb, skb->data - skb_mac_header(skb)); } skb_reset_mac_len(skb); - key->eth.type = skb->protocol; /* Network layer. */ if (key->eth.type == htons(ETH_P_IP)) { From 701768dc9a1030d3f02b9b3e9b4690d15f637dad Mon Sep 17 00:00:00 2001 From: Bert Kenward Date: Thu, 7 Dec 2017 17:18:58 +0000 Subject: [PATCH 1149/3326] sfc: pass valid pointers from efx_enqueue_unwind [ Upstream commit d4a7a8893d4cdbc89d79ac4aa704bf8d4b67b368 ] The bytes_compl and pkts_compl pointers passed to efx_dequeue_buffers cannot be NULL. Add a paranoid warning to check this condition and fix the one case where they were NULL. efx_enqueue_unwind() is called very rarely, during error handling. Without this fix it would fail with a NULL pointer dereference in efx_dequeue_buffer, with efx_enqueue_skb in the call stack. Fixes: e9117e5099ea ("sfc: Firmware-Assisted TSO version 2") Reported-by: Jarod Wilson Signed-off-by: Bert Kenward Tested-by: Jarod Wilson Acked-by: Jarod Wilson Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/sfc/tx.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 32bf1fecf86406..9b85cbd5a231b6 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -77,6 +77,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, } if (buffer->flags & EFX_TX_BUF_SKB) { + EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); (*pkts_compl)++; (*bytes_compl) += buffer->skb->len; dev_consume_skb_any((struct sk_buff *)buffer->skb); @@ -426,12 +427,14 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) { struct efx_tx_buffer *buffer; + unsigned int bytes_compl = 0; + unsigned int pkts_compl = 0; /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { --tx_queue->insert_count; buffer = __efx_tx_queue_get_insert_buffer(tx_queue); - efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); } } From ff1ff3815c2483ac19bd6f926a535dd4cea27e2e Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 21 Nov 2017 17:37:46 -0800 Subject: [PATCH 1150/3326] net: dsa: bcm_sf2: Clear IDDQ_GLOBAL_PWR bit for PHY [ Upstream commit 4b52d010113e11006a389f2a8315167ede9e0b10 ] The PHY on BCM7278 has an additional bit that needs to be cleared: IDDQ_GLOBAL_PWR, without doing this, the PHY remains stuck in reset out of suspend/resume cycles. Fixes: 0fe9933804eb ("net: dsa: bcm_sf2: Add support for BCM7278 integrated switch") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/dsa/bcm_sf2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index d7b53d53c116e9..72d6ffbfd6387f 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -167,7 +167,7 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) reg = reg_readl(priv, REG_SPHY_CNTRL); if (enable) { reg |= PHY_RESET; - reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS); + reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS); reg_writel(priv, reg, REG_SPHY_CNTRL); udelay(21); reg = reg_readl(priv, REG_SPHY_CNTRL); From f38ffe325b209f367c003bab291cf7e96cd1a6d9 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 20 Dec 2017 18:07:18 +0100 Subject: [PATCH 1151/3326] s390/qeth: fix error handling in checksum cmd callback [ Upstream commit ad3cbf61332914711e5f506972b1dc9af8d62146 ] Make sure to check both return code fields before processing the response. Otherwise we risk operating on invalid data. Fixes: c9475369bd2b ("s390/qeth: rework RX/TX checksum offload") Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/s390/net/qeth_core_main.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 291eb89ecb06b3..7c7a244b6684e4 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -5445,6 +5445,13 @@ int qeth_poll(struct napi_struct *napi, int budget) } EXPORT_SYMBOL_GPL(qeth_poll); +static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) +{ + if (!cmd->hdr.return_code) + cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; + return cmd->hdr.return_code; +} + int qeth_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { @@ -6304,7 +6311,7 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card, (struct qeth_checksum_cmd *)reply->param; QETH_CARD_TEXT(card, 4, "chkdoccb"); - if (cmd->hdr.return_code) + if (qeth_setassparms_inspect_rc(cmd)) return 0; memset(chksum_cb, 0, sizeof(*chksum_cb)); From 201c59bb7ba69fd6a41ac6606d8049b94fb26232 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 10 Dec 2017 15:40:51 +0800 Subject: [PATCH 1152/3326] sctp: make sure stream nums can match optlen in sctp_setsockopt_reset_streams [ Upstream commit 2342b8d95bcae5946e1b9b8d58645f37500ef2e7 ] Now in sctp_setsockopt_reset_streams, it only does the check optlen < sizeof(*params) for optlen. But it's not enough, as params->srs_number_streams should also match optlen. If the streams in params->srs_stream_list are less than stream nums in params->srs_number_streams, later when dereferencing the stream list, it could cause a slab-out-of-bounds crash, as reported by syzbot. This patch is to fix it by also checking the stream numbers in sctp_setsockopt_reset_streams to make sure at least it's not greater than the streams in the list. Fixes: 7f9d68ac944e ("sctp: implement sender-side procedures for SSN Reset Request Parameter") Reported-by: Dmitry Vyukov Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 1977238dc02376..df806b8819aa4c 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3874,13 +3874,17 @@ static int sctp_setsockopt_reset_streams(struct sock *sk, struct sctp_association *asoc; int retval = -EINVAL; - if (optlen < sizeof(struct sctp_reset_streams)) + if (optlen < sizeof(*params)) return -EINVAL; params = memdup_user(optval, optlen); if (IS_ERR(params)) return PTR_ERR(params); + if (params->srs_number_streams * sizeof(__u16) > + optlen - sizeof(*params)) + goto out; + asoc = sctp_id2assoc(sk, params->srs_assoc_id); if (!asoc) goto out; From 92ae8233467b3a19a50fb02a7ebe065c6de3df17 Mon Sep 17 00:00:00 2001 From: Parthasarathy Bhuvaragan Date: Thu, 28 Dec 2017 12:03:06 +0100 Subject: [PATCH 1153/3326] tipc: fix hanging poll() for stream sockets [ Upstream commit 517d7c79bdb39864e617960504bdc1aa560c75c6 ] In commit 42b531de17d2f6 ("tipc: Fix missing connection request handling"), we replaced unconditional wakeup() with condtional wakeup for clients with flags POLLIN | POLLRDNORM | POLLRDBAND. This breaks the applications which do a connect followed by poll with POLLOUT flag. These applications are not woken when the connection is ESTABLISHED and hence sleep forever. In this commit, we fix it by including the POLLOUT event for sockets in TIPC_CONNECTING state. Fixes: 42b531de17d2f6 ("tipc: Fix missing connection request handling") Acked-by: Jon Maloy Signed-off-by: Parthasarathy Bhuvaragan Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tipc/socket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index d50edd6e00196a..98a44ecb11e7be 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -709,11 +709,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock, switch (sk->sk_state) { case TIPC_ESTABLISHED: + case TIPC_CONNECTING: if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) mask |= POLLOUT; /* fall thru' */ case TIPC_LISTEN: - case TIPC_CONNECTING: if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= (POLLIN | POLLRDNORM); break; From bcc029ff5dafc68d00a5fceadd93763d2b43e0e3 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Fri, 15 Dec 2017 08:44:21 +0100 Subject: [PATCH 1154/3326] mlxsw: spectrum: Disable MAC learning for ovs port [ Upstream commit fccff0862838908d21eaf956d57e09c6c189f7c5 ] Learning is currently enabled for ports which are OVS slaves - even though OVS doesn't need this indication. Since we're not associating a fid with the port, HW would continuously notify driver of learned [& aged] MACs which would be logged as errors. Fixes: 2b94e58df58c ("mlxsw: spectrum: Allow ports to work under OVS master") Signed-off-by: Yuval Mintz Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index db38880f54b4c6..3ead7439821c9e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4164,6 +4164,7 @@ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) { + u16 vid = 1; int err; err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); @@ -4176,8 +4177,19 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) true, false); if (err) goto err_port_vlan_set; + + for (; vid <= VLAN_N_VID - 1; vid++) { + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, + vid, false); + if (err) + goto err_vid_learning_set; + } + return 0; +err_vid_learning_set: + for (vid--; vid >= 1; vid--) + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); err_port_vlan_set: mlxsw_sp_port_stp_set(mlxsw_sp_port, false); err_port_stp_set: @@ -4187,6 +4199,12 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) { + u16 vid; + + for (vid = VLAN_N_VID - 1; vid >= 1; vid--) + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, + vid, true); + mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, false, false); mlxsw_sp_port_stp_set(mlxsw_sp_port, false); From 583395a81f00a42f178c1d8ba3c4858d76c9f5b4 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Tue, 12 Dec 2017 16:28:58 -0800 Subject: [PATCH 1155/3326] tcp: fix potential underestimation on rcv_rtt [ Upstream commit 9ee11bd03cb1a5c3ca33c2bb70e7ed325f68890f ] When ms timestamp is used, current logic uses 1us in tcp_rcv_rtt_update() when the real rcv_rtt is within 1 - 999us. This could cause rcv_rtt underestimation. Fix it by always using a min value of 1ms if ms timestamp is used. Fixes: 645f4c6f2ebd ("tcp: switch rcv_rtt_est and rcvq_space to high resolution timestamps") Signed-off-by: Wei Wang Signed-off-by: Eric Dumazet Acked-by: Neal Cardwell Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_input.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a965a38211ba4b..ff48ac654e5ae1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -521,9 +521,6 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) u32 new_sample = tp->rcv_rtt_est.rtt_us; long m = sample; - if (m == 0) - m = 1; - if (new_sample != 0) { /* If we sample in larger samples in the non-timestamp * case, we could grossly overestimate the RTT especially @@ -560,6 +557,8 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) return; delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); + if (!delta_us) + delta_us = 1; tcp_rcv_rtt_update(tp, delta_us, 1); new_measure: @@ -576,8 +575,11 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, (TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) { u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; - u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); + u32 delta_us; + if (!delta) + delta = 1; + delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); tcp_rcv_rtt_update(tp, delta_us, 0); } } From 5e255d684d0554893768b4452de404f293c71696 Mon Sep 17 00:00:00 2001 From: Zhao Qiang Date: Mon, 18 Dec 2017 10:26:43 +0800 Subject: [PATCH 1156/3326] net: phy: marvell: Limit 88m1101 autoneg errata to 88E1145 as well. [ Upstream commit c505873eaece2b4aefd07d339dc7e1400e0235ac ] 88E1145 also need this autoneg errata. Fixes: f2899788353c ("net: phy: marvell: Limit errata to 88m1101") Signed-off-by: Zhao Qiang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/marvell.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 4d02b27df0445e..a3f456b91c99de 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2069,7 +2069,7 @@ static struct phy_driver marvell_drivers[] = { .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1145_config_init, - .config_aneg = &marvell_config_aneg, + .config_aneg = &m88e1101_config_aneg, .read_status = &genphy_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, From 333921964046fe8453a90b60aa55dc4725955073 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 20 Dec 2017 12:28:25 +0200 Subject: [PATCH 1157/3326] ipv6: Honor specified parameters in fibmatch lookup [ Upstream commit 58acfd714e6b02e8617448b431c2b64a2f1f0792 ] Currently, parameters such as oif and source address are not taken into account during fibmatch lookup. Example (IPv4 for reference) before patch: $ ip -4 route show 192.0.2.0/24 dev dummy0 proto kernel scope link src 192.0.2.1 198.51.100.0/24 dev dummy1 proto kernel scope link src 198.51.100.1 $ ip -6 route show 2001:db8:1::/64 dev dummy0 proto kernel metric 256 pref medium 2001:db8:2::/64 dev dummy1 proto kernel metric 256 pref medium fe80::/64 dev dummy0 proto kernel metric 256 pref medium fe80::/64 dev dummy1 proto kernel metric 256 pref medium $ ip -4 route get fibmatch 192.0.2.2 oif dummy0 192.0.2.0/24 dev dummy0 proto kernel scope link src 192.0.2.1 $ ip -4 route get fibmatch 192.0.2.2 oif dummy1 RTNETLINK answers: No route to host $ ip -6 route get fibmatch 2001:db8:1::2 oif dummy0 2001:db8:1::/64 dev dummy0 proto kernel metric 256 pref medium $ ip -6 route get fibmatch 2001:db8:1::2 oif dummy1 2001:db8:1::/64 dev dummy0 proto kernel metric 256 pref medium After: $ ip -6 route get fibmatch 2001:db8:1::2 oif dummy0 2001:db8:1::/64 dev dummy0 proto kernel metric 256 pref medium $ ip -6 route get fibmatch 2001:db8:1::2 oif dummy1 RTNETLINK answers: Network is unreachable The problem stems from the fact that the necessary route lookup flags are not set based on these parameters. Instead of duplicating the same logic for fibmatch, we can simply resolve the original route from its copy and dump it instead. Fixes: 18c3a61c4264 ("net: ipv6: RTM_GETROUTE: return matched fib result when requested") Signed-off-by: Ido Schimmel Acked-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/route.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 598efa8cfe250c..ca8d3266e92e0d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3700,19 +3700,13 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, if (!ipv6_addr_any(&fl6.saddr)) flags |= RT6_LOOKUP_F_HAS_SADDR; - if (!fibmatch) - dst = ip6_route_input_lookup(net, dev, &fl6, flags); - else - dst = ip6_route_lookup(net, &fl6, 0); + dst = ip6_route_input_lookup(net, dev, &fl6, flags); rcu_read_unlock(); } else { fl6.flowi6_oif = oif; - if (!fibmatch) - dst = ip6_route_output(net, NULL, &fl6); - else - dst = ip6_route_lookup(net, &fl6, 0); + dst = ip6_route_output(net, NULL, &fl6); } @@ -3729,6 +3723,15 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, goto errout; } + if (fibmatch && rt->dst.from) { + struct rt6_info *ort = container_of(rt->dst.from, + struct rt6_info, dst); + + dst_hold(&ort->dst); + ip6_rt_put(rt); + rt = ort; + } + skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { ip6_rt_put(rt); From 5504319c6993d51565b582c685c3e09c0e07fded Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 12 Dec 2017 18:22:52 -0800 Subject: [PATCH 1158/3326] tcp: refresh tcp_mstamp from timers callbacks [ Upstream commit 4688eb7cf3ae2c2721d1dacff5c1384cba47d176 ] Only the retransmit timer currently refreshes tcp_mstamp We should do the same for delayed acks and keepalives. Even if RFC 7323 does not request it, this is consistent to what linux did in the past, when TS values were based on jiffies. Fixes: 385e20706fac ("tcp: use tp->tcp_mstamp in output path") Signed-off-by: Eric Dumazet Cc: Soheil Hassas Yeganeh Cc: Mike Maloney Cc: Neal Cardwell Acked-by: Neal Cardwell Acked-by: Soheil Hassas Yeganeh Acked-by: Mike Maloney Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_timer.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 655dd8d7f064f9..e9af1879cd5360 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -264,6 +264,7 @@ void tcp_delack_timer_handler(struct sock *sk) icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; } + tcp_mstamp_refresh(tcp_sk(sk)); tcp_send_ack(sk); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); } @@ -627,6 +628,7 @@ static void tcp_keepalive_timer (unsigned long data) goto out; } + tcp_mstamp_refresh(tp); if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { if (tp->linger2 >= 0) { const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; From cea58617977baafe4a1be8f55c8e55d21c6cfb20 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Sun, 29 Oct 2017 04:03:37 +0200 Subject: [PATCH 1159/3326] net/mlx5: FPGA, return -EINVAL if size is zero [ Upstream commit bae115a2bb479142605726e6aa130f43f50e801a ] Currently, if a size of zero is passed to mlx5_fpga_mem_{read|write}_i2c() the "err" return value will not be initialized, which triggers gcc warnings: [..]/mlx5/core/fpga/sdk.c:87 mlx5_fpga_mem_read_i2c() error: uninitialized symbol 'err'. [..]/mlx5/core/fpga/sdk.c:115 mlx5_fpga_mem_write_i2c() error: uninitialized symbol 'err'. fix that. Fixes: a9956d35d199 ('net/mlx5: FPGA, Add SBU infrastructure') Signed-off-by: Kamal Heib Reviewed-by: Yevgeny Kliteynik Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c index 3c11d6e2160abe..14962969c5ba8c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c @@ -66,6 +66,9 @@ static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size, u8 actual_size; int err; + if (!size) + return -EINVAL; + if (!fdev->mdev) return -ENOTCONN; @@ -95,6 +98,9 @@ static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size, u8 actual_size; int err; + if (!size) + return -EINVAL; + if (!fdev->mdev) return -ENOTCONN; From 215b69e208089f42c29f9343ef11f4adc6ea6f6b Mon Sep 17 00:00:00 2001 From: Alexey Kodanev Date: Thu, 14 Dec 2017 20:20:00 +0300 Subject: [PATCH 1160/3326] vxlan: restore dev->mtu setting based on lower device [ Upstream commit f870c1ff65a6d1f3a083f277280802ee09a5b44d ] Stefano Brivio says: Commit a985343ba906 ("vxlan: refactor verification and application of configuration") introduced a change in the behaviour of initial MTU setting: earlier, the MTU for a link created on top of a given lower device, without an initial MTU specification, was set to the MTU of the lower device minus headroom as a result of this path in vxlan_dev_configure(): if (!conf->mtu) dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); which is now gone. Now, the initial MTU, in absence of a configured value, is simply set by ether_setup() to ETH_DATA_LEN (1500 bytes). This breaks userspace expectations in case the MTU of the lower device is higher than 1500 bytes minus headroom. This patch restores the previous behaviour on newlink operation. Since max_mtu can be negative and we update dev->mtu directly, also check it for valid minimum. Reported-by: Junhan Yan Fixes: a985343ba906 ("vxlan: refactor verification and application of configuration") Signed-off-by: Alexey Kodanev Acked-by: Stefano Brivio Signed-off-by: Stefano Brivio Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/vxlan.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index a2f4e52fadb561..9e9202b50e7328 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -3105,6 +3105,11 @@ static void vxlan_config_apply(struct net_device *dev, max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); + if (max_mtu < ETH_MIN_MTU) + max_mtu = ETH_MIN_MTU; + + if (!changelink && !conf->mtu) + dev->mtu = max_mtu; } if (dev->mtu > max_mtu) From 11295730446fd654aeef28a575d801f3679424fa Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 15 Dec 2017 12:40:13 +0100 Subject: [PATCH 1161/3326] net: sched: fix static key imbalance in case of ingress/clsact_init error [ Upstream commit b59e6979a86384e68b0ab6ffeab11f0034fba82d ] Move static key increments to the beginning of the init function so they pair 1:1 with decrements in ingress/clsact_destroy, which is called in case ingress/clsact_init fails. Fixes: 6529eaba33f0 ("net: sched: introduce tcf block infractructure") Signed-off-by: Jiri Pirko Acked-by: Cong Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sched/sch_ingress.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 44de4ee51ce9fe..a08a32fa094930 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -59,11 +59,12 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt) struct net_device *dev = qdisc_dev(sch); int err; + net_inc_ingress_queue(); + err = tcf_block_get(&q->block, &dev->ingress_cl_list); if (err) return err; - net_inc_ingress_queue(); sch->flags |= TCQ_F_CPUSTATS; return 0; @@ -153,6 +154,9 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt) struct net_device *dev = qdisc_dev(sch); int err; + net_inc_ingress_queue(); + net_inc_egress_queue(); + err = tcf_block_get(&q->ingress_block, &dev->ingress_cl_list); if (err) return err; @@ -161,9 +165,6 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt) if (err) return err; - net_inc_ingress_queue(); - net_inc_egress_queue(); - sch->flags |= TCQ_F_CPUSTATS; return 0; From 7f6dcb82d04045f143758f56caf031376f9bf16c Mon Sep 17 00:00:00 2001 From: Calvin Owens Date: Fri, 8 Dec 2017 09:05:26 -0800 Subject: [PATCH 1162/3326] bnxt_en: Fix sources of spurious netpoll warnings [ Upstream commit 2edbdb3159d6f6bd3a9b6e7f789f2b879699a519 ] After applying 2270bc5da3497945 ("bnxt_en: Fix netpoll handling") and 903649e718f80da2 ("bnxt_en: Improve -ENOMEM logic in NAPI poll loop."), we still see the following WARN fire: ------------[ cut here ]------------ WARNING: CPU: 0 PID: 1875170 at net/core/netpoll.c:165 netpoll_poll_dev+0x15a/0x160 bnxt_poll+0x0/0xd0 exceeded budget in poll Call Trace: [] dump_stack+0x4d/0x70 [] __warn+0xd3/0xf0 [] warn_slowpath_fmt+0x4f/0x60 [] netpoll_poll_dev+0x15a/0x160 [] netpoll_send_skb_on_dev+0x168/0x250 [] netpoll_send_udp+0x2dc/0x440 [] write_ext_msg+0x20e/0x250 [] call_console_drivers.constprop.23+0xa5/0x110 [] console_unlock+0x339/0x5b0 [] vprintk_emit+0x2c8/0x450 [] vprintk_default+0x1f/0x30 [] printk+0x48/0x50 [] edac_raw_mc_handle_error+0x563/0x5c0 [edac_core] [] edac_mc_handle_error+0x42b/0x6e0 [edac_core] [] sbridge_mce_output_error+0x410/0x10d0 [sb_edac] [] sbridge_check_error+0xac/0x130 [sb_edac] [] edac_mc_workq_function+0x3c/0x90 [edac_core] [] process_one_work+0x19b/0x480 [] worker_thread+0x6a/0x520 [] kthread+0xe4/0x100 [] ret_from_fork+0x22/0x40 This happens because we increment rx_pkts on -ENOMEM and -EIO, resulting in rx_pkts > 0. Fix this by only bumping rx_pkts if we were actually given a non-zero budget. Signed-off-by: Calvin Owens Acked-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index dc5de275352a7f..aa764c5e3c6b9f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1875,7 +1875,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) * here forever if we consistently cannot allocate * buffers. */ - else if (rc == -ENOMEM) + else if (rc == -ENOMEM && budget) rx_pkts++; else if (rc == -EBUSY) /* partial completion */ break; @@ -1961,7 +1961,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); - if (likely(rc == -EIO)) + if (likely(rc == -EIO) && budget) rx_pkts++; else if (rc == -EBUSY) /* partial completion */ break; From 39889c293371e826503b61d98050a49742ccaf9c Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 20 Dec 2017 23:21:28 +0000 Subject: [PATCH 1163/3326] phylink: ensure the PHY interface mode is appropriately set [ Upstream commit 182088aa3c6c7f7c20a2c1dcc9ded4a3fc631f38 ] When setting the ethtool settings, ensure that the validated PHY interface mode is propagated to the current link settings, so that 2500BaseX can be selected. Fixes: 9525ae83959b ("phylink: add phylink infrastructure") Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/phylink.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index bcb4755bcd957d..38d81175bfd3d6 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -948,6 +948,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, mutex_lock(&pl->state_mutex); /* Configure the MAC to match the new settings */ linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising); + pl->link_config.interface = config.interface; pl->link_config.speed = our_kset.base.speed; pl->link_config.duplex = our_kset.base.duplex; pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; From 185a3475dee6c14dd67d1a94178b6b5615cf167b Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 20 Dec 2017 23:21:34 +0000 Subject: [PATCH 1164/3326] phylink: ensure AN is enabled [ Upstream commit 74ee0e8c1bf9925c59cc8f1c65c29adf6e4cf603 ] Ensure that we mark AN as enabled at boot time, rather than leaving it disabled. This is noticable if your SFP module is fiber, and it supports faster speeds than 1G with 2.5G support in place. Fixes: 9525ae83959b ("phylink: add phylink infrastructure") Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/phylink.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 38d81175bfd3d6..4b377b978a0be4 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -525,6 +525,7 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np, pl->link_config.pause = MLO_PAUSE_AN; pl->link_config.speed = SPEED_UNKNOWN; pl->link_config.duplex = DUPLEX_UNKNOWN; + pl->link_config.an_enabled = true; pl->ops = ops; __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); From 86deaaa0ca2b0657e1f72e1087153d4ff22a312b Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Tue, 19 Dec 2017 15:17:13 +0100 Subject: [PATCH 1165/3326] ipv4: fib: Fix metrics match when deleting a route [ Upstream commit d03a45572efa068fa64db211d6d45222660e76c5 ] The recently added fib_metrics_match() causes a regression for routes with both RTAX_FEATURES and RTAX_CC_ALGO if the latter has TCP_CONG_NEEDS_ECN flag set: | # ip link add d0 type dummy | # ip link set d0 up | # ip route add 172.29.29.0/24 dev d0 features ecn congctl dctcp | # ip route del 172.29.29.0/24 dev d0 features ecn congctl dctcp | RTNETLINK answers: No such process During route insertion, fib_convert_metrics() detects that the given CC algo requires ECN and hence sets DST_FEATURE_ECN_CA bit in RTAX_FEATURES. During route deletion though, fib_metrics_match() compares stored RTAX_FEATURES value with that from userspace (which obviously has no knowledge about DST_FEATURE_ECN_CA) and fails. Fixes: 5f9ae3d9e7e4a ("ipv4: do metrics match when looking up and deleting a route") Signed-off-by: Phil Sutter Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/fib_semantics.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 01ed22139ac2b1..aff3751df95035 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -706,7 +706,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi) nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { int type = nla_type(nla); - u32 val; + u32 fi_val, val; if (!type) continue; @@ -723,7 +723,11 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi) val = nla_get_u32(nla); } - if (fi->fib_metrics->metrics[type - 1] != val) + fi_val = fi->fib_metrics->metrics[type - 1]; + if (type == RTAX_FEATURES) + fi_val &= ~DST_FEATURE_ECN_CA; + + if (fi_val != val) return false; } From f2272d5dce7905d605f26ba8a5f9961d05263e74 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 14 Nov 2017 14:21:32 +0100 Subject: [PATCH 1166/3326] ipv6: set all.accept_dad to 0 by default [ Upstream commit 094009531612246d9e13f9e0c3ae2205d7f63a0a ] With commits 35e015e1f577 and a2d3f3e33853, the global 'accept_dad' flag is also taken into account (default value is 1). If either global or per-interface flag is non-zero, DAD will be enabled on a given interface. This is not backward compatible: before those patches, the user could disable DAD just by setting the per-interface flag to 0. Now, the user instead needs to set both flags to 0 to actually disable DAD. Restore the previous behaviour by setting the default for the global 'accept_dad' flag to 0. This way, DAD is still enabled by default, as per-interface flags are set to 1 on device creation, but setting them to 0 is enough to disable DAD on a given interface. - Before 35e015e1f57a7 and a2d3f3e33853: global per-interface DAD enabled [default] 1 1 yes X 0 no X 1 yes - After 35e015e1f577 and a2d3f3e33853: global per-interface DAD enabled [default] 1 1 yes 0 0 no 0 1 yes 1 0 yes - After this fix: global per-interface DAD enabled 1 1 yes 0 0 no [default] 0 1 yes 1 0 yes Fixes: 35e015e1f577 ("ipv6: fix net.ipv6.conf.all interface DAD handlers") Fixes: a2d3f3e33853 ("ipv6: fix net.ipv6.conf.all.accept_dad behaviour for real") CC: Stefano Brivio CC: Matteo Croce CC: Erik Kline Signed-off-by: Nicolas Dichtel Acked-by: Stefano Brivio Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/addrconf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 2ec39404c44928..c5318f5f6a1441 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -231,7 +231,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { .proxy_ndp = 0, .accept_source_route = 0, /* we do not accept RH0 by default. */ .disable_ipv6 = 0, - .accept_dad = 1, + .accept_dad = 0, .suppress_frag_ndisc = 1, .accept_ra_mtu = 1, .stable_secret = { From 8f5bbb29b62c818e16de49c0bbc945b501c157cf Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Fri, 10 Nov 2017 15:59:52 +0900 Subject: [PATCH 1167/3326] Revert "mlx5: move affinity hints assignments to generic code" [ Upstream commit 231243c82793428467524227ae02ca451e6a98e7 ] Before the offending commit, mlx5 core did the IRQ affinity itself, and it seems that the new generic code have some drawbacks and one of them is the lack for user ability to modify irq affinity after the initial affinity values got assigned. The issue is still being discussed and a solution in the new generic code is required, until then we need to revert this patch. This fixes the following issue: echo > /proc/irq//smp_affinity fails with -EIO This reverts commit a435393acafbf0ecff4deb3e3cb554b34f0d0664. Note: kept mlx5_get_vector_affinity in include/linux/mlx5/driver.h since it is used in mlx5_ib driver. Fixes: a435393acafb ("mlx5: move affinity hints assignments to generic code") Cc: Sagi Grimberg Cc: Thomas Gleixner Cc: Jes Sorensen Reported-by: Jes Sorensen Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + .../net/ethernet/mellanox/mlx5/core/en_main.c | 45 ++++++----- .../net/ethernet/mellanox/mlx5/core/main.c | 75 +++++++++++++++++-- include/linux/mlx5/driver.h | 1 + 4 files changed, 93 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 13b5ef9d8703fd..5fa07162010491 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -590,6 +590,7 @@ struct mlx5e_channel { struct mlx5_core_dev *mdev; struct mlx5e_tstamp *tstamp; int ix; + int cpu; }; struct mlx5e_channels { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 517727f277c5fe..3cdb932cae76ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -71,11 +71,6 @@ struct mlx5e_channel_param { struct mlx5e_cq_param icosq_cq; }; -static int mlx5e_get_node(struct mlx5e_priv *priv, int ix) -{ - return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix); -} - static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { return MLX5_CAP_GEN(mdev, striding_rq) && @@ -452,17 +447,16 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int wq_sz = mlx5_wq_ll_get_size(&rq->wq); int mtt_sz = mlx5e_get_wqe_mtt_sz(); int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1; - int node = mlx5e_get_node(c->priv, c->ix); int i; rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), - GFP_KERNEL, node); + GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->mpwqe.info) goto err_out; /* We allocate more than mtt_sz as we will align the pointer */ - rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, - GFP_KERNEL, node); + rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL, + cpu_to_node(c->cpu)); if (unlikely(!rq->mpwqe.mtt_no_align)) goto err_free_wqe_info; @@ -570,7 +564,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, int err; int i; - rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + rqp->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq, &rq->wq_ctrl); @@ -636,8 +630,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, default: /* MLX5_WQ_TYPE_LINKED_LIST */ rq->wqe.frag_info = kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info), - GFP_KERNEL, - mlx5e_get_node(c->priv, c->ix)); + GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->wqe.frag_info) { err = -ENOMEM; goto err_rq_wq_destroy; @@ -1007,13 +1000,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; - param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) return err; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix)); + err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; @@ -1060,13 +1053,13 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; - param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) return err; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix)); + err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; @@ -1132,13 +1125,13 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); - param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) return err; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix)); + err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; @@ -1510,8 +1503,8 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c, struct mlx5_core_dev *mdev = c->priv->mdev; int err; - param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix); - param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.buf_numa_node = cpu_to_node(c->cpu); + param->wq.db_numa_node = cpu_to_node(c->cpu); param->eq_ix = c->ix; err = mlx5e_alloc_cq_common(mdev, param, cq); @@ -1610,6 +1603,11 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq) mlx5e_free_cq(cq); } +static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) +{ + return cpumask_first(priv->mdev->priv.irq_info[ix].mask); +} + static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) @@ -1758,12 +1756,13 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, { struct mlx5e_cq_moder icocq_moder = {0, 0}; struct net_device *netdev = priv->netdev; + int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; unsigned int irq; int err; int eqn; - c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix)); + c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM; @@ -1771,6 +1770,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->mdev = priv->mdev; c->tstamp = &priv->tstamp; c->ix = ix; + c->cpu = cpu; c->pdev = &priv->mdev->pdev->dev; c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); @@ -1859,8 +1859,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) for (tc = 0; tc < c->num_tc; tc++) mlx5e_activate_txqsq(&c->sq[tc]); mlx5e_activate_rq(&c->rq); - netif_set_xps_queue(c->netdev, - mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix); + netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix); } static void mlx5e_deactivate_channel(struct mlx5e_channel *c) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 06562c9a6b9cb8..8bfc37e4ec873d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -316,9 +316,6 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; struct mlx5_eq_table *table = &priv->eq_table; - struct irq_affinity irqdesc = { - .pre_vectors = MLX5_EQ_VEC_COMP_BASE, - }; int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); int nvec; @@ -332,10 +329,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev) if (!priv->irq_info) goto err_free_msix; - nvec = pci_alloc_irq_vectors_affinity(dev->pdev, + nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1, nvec, - PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, - &irqdesc); + PCI_IRQ_MSIX); if (nvec < 0) return nvec; @@ -621,6 +617,63 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev) return (u64)timer_l | (u64)timer_h1 << 32; } +static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); + + if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { + mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); + return -ENOMEM; + } + + cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), + priv->irq_info[i].mask); + + if (IS_ENABLED(CONFIG_SMP) && + irq_set_affinity_hint(irq, priv->irq_info[i].mask)) + mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); + + return 0; +} + +static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); + + irq_set_affinity_hint(irq, NULL); + free_cpumask_var(priv->irq_info[i].mask); +} + +static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev) +{ + int err; + int i; + + for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) { + err = mlx5_irq_set_affinity_hint(mdev, i); + if (err) + goto err_out; + } + + return 0; + +err_out: + for (i--; i >= 0; i--) + mlx5_irq_clear_affinity_hint(mdev, i); + + return err; +} + +static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev) +{ + int i; + + for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) + mlx5_irq_clear_affinity_hint(mdev, i); +} + int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, unsigned int *irqn) { @@ -1093,6 +1146,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_stop_eqs; } + err = mlx5_irq_set_affinity_hints(dev); + if (err) { + dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n"); + goto err_affinity_hints; + } + err = mlx5_init_fs(dev); if (err) { dev_err(&pdev->dev, "Failed to init flow steering\n"); @@ -1150,6 +1209,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_cleanup_fs(dev); err_fs: + mlx5_irq_clear_affinity_hints(dev); + +err_affinity_hints: free_comp_eqs(dev); err_stop_eqs: @@ -1218,6 +1280,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_sriov_detach(dev); mlx5_cleanup_fs(dev); + mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_put_uars_page(dev, priv->uar); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 401c8972cc3a86..8b3d0103c03aae 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -546,6 +546,7 @@ struct mlx5_core_sriov { }; struct mlx5_irq_info { + cpumask_var_t mask; char name[MLX5_MAX_IRQ_NAME]; }; From 17155ea827b2fd81330a442ed56d0edafd9969e1 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Wed, 20 Dec 2017 17:37:49 -0500 Subject: [PATCH 1168/3326] skbuff: orphan frags before zerocopy clone [ Upstream commit 268b790679422a89e9ab0685d9f291edae780c98 ] Call skb_zerocopy_clone after skb_orphan_frags, to avoid duplicate calls to skb_uarg(skb)->callback for the same data. skb_zerocopy_clone associates skb_shinfo(skb)->uarg from frag_skb with each segment. This is only safe for uargs that do refcounting, which is those that pass skb_orphan_frags without dropping their shared frags. For others, skb_orphan_frags drops the user frags and sets the uarg to NULL, after which sock_zerocopy_clone has no effect. Qemu hangs were reported due to duplicate vhost_net_zerocopy_callback calls for the same data causing the vhost_net_ubuf_ref_>refcount to drop below zero. Link: http://lkml.kernel.org/r/ Fixes: 1f8b977ab32d ("sock: enable MSG_ZEROCOPY") Reported-by: Andreas Hartmann Reported-by: David Hill Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/skbuff.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 623143d7844d84..3c2e1db62eedd3 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3657,8 +3657,6 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags & SKBTX_SHARED_FRAG; - if (skb_zerocopy_clone(nskb, head_skb, GFP_ATOMIC)) - goto err; while (pos < offset + len) { if (i >= nfrags) { @@ -3684,6 +3682,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC))) goto err; + if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) + goto err; *nskb_frag = *frag; __skb_frag_ref(nskb_frag); From 49cd180d4a104159f240a9a896e5f13844227378 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Wed, 20 Dec 2017 17:37:50 -0500 Subject: [PATCH 1169/3326] skbuff: skb_copy_ubufs must release uarg even without user frags [ Upstream commit b90ddd568792bcb0054eaf0f61785c8f80c3bd1c ] skb_copy_ubufs creates a private copy of frags[] to release its hold on user frags, then calls uarg->callback to notify the owner. Call uarg->callback even when no frags exist. This edge case can happen when zerocopy_sg_from_iter finds enough room in skb_headlen to copy all the data. Fixes: 3ece782693c4 ("sock: skb_copy_ubufs support for compound pages") Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/skbuff.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3c2e1db62eedd3..4a10e96c6efc67 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1182,7 +1182,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) u32 d_off; if (!num_frags) - return 0; + goto release; if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) return -EINVAL; @@ -1242,6 +1242,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); skb_shinfo(skb)->nr_frags = new_frags; +release: skb_zcopy_clear(skb, false); return 0; } From b9edd6bf0ccb2939a7716b80d3e73a24ead9fe62 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 28 Dec 2017 12:38:13 -0500 Subject: [PATCH 1170/3326] skbuff: in skb_copy_ubufs unclone before releasing zerocopy skb_copy_ubufs must unclone before it is safe to modify its skb_shared_info with skb_zcopy_clear. Commit b90ddd568792 ("skbuff: skb_copy_ubufs must release uarg even without user frags") ensures that all skbs release their zerocopy state, even those without frags. But I forgot an edge case where such an skb arrives that is cloned. The stack does not build such packets. Vhost/tun skbs have their frags orphaned before cloning. TCP skbs only attach zerocopy state when a frag is added. But if TCP packets can be trimmed or linearized, this might occur. Tracing the code I found no instance so far (e.g., skb_linearize ends up calling skb_zcopy_clear if !skb->data_len). Still, it is non-obvious that no path exists. And it is fragile to rely on this. Fixes: b90ddd568792 ("skbuff: skb_copy_ubufs must release uarg even without user frags") Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/skbuff.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4a10e96c6efc67..15fa5baa8faef7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1181,12 +1181,12 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) int i, new_frags; u32 d_off; - if (!num_frags) - goto release; - if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) return -EINVAL; + if (!num_frags) + goto release; + new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < new_frags; i++) { page = alloc_page(gfp_mask); From 38e8981d5490b2908136baea1f9e3b84a87aec01 Mon Sep 17 00:00:00 2001 From: Jan Engelhardt Date: Mon, 25 Dec 2017 03:43:53 +0100 Subject: [PATCH 1171/3326] sparc64: repair calling incorrect hweight function from stubs [ Upstream commit 59585b4be9ae4dc6506551709bdcd6f5210b8a01 ] Commit v4.12-rc4-1-g9289ea7f952b introduced a mistake that made the 64-bit hweight stub call the 16-bit hweight function. Fixes: 9289ea7f952b ("sparc64: Use indirect calls in hamming weight stubs") Signed-off-by: Jan Engelhardt Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- arch/sparc/lib/hweight.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S index e5547b22cd1832..0ddbbb03182232 100644 --- a/arch/sparc/lib/hweight.S +++ b/arch/sparc/lib/hweight.S @@ -44,8 +44,8 @@ EXPORT_SYMBOL(__arch_hweight32) .previous ENTRY(__arch_hweight64) - sethi %hi(__sw_hweight16), %g1 - jmpl %g1 + %lo(__sw_hweight16), %g0 + sethi %hi(__sw_hweight64), %g1 + jmpl %g1 + %lo(__sw_hweight64), %g0 nop ENDPROC(__arch_hweight64) EXPORT_SYMBOL(__arch_hweight64) From 2ee11dcfc9e1c894b058de64fb8b693cf697fbbe Mon Sep 17 00:00:00 2001 From: Juan Zea Date: Fri, 15 Dec 2017 10:21:20 +0100 Subject: [PATCH 1172/3326] usbip: fix usbip bind writing random string after command in match_busid commit 544c4605acc5ae4afe7dd5914147947db182f2fb upstream. usbip bind writes commands followed by random string when writing to match_busid attribute in sysfs, caused by using full variable size instead of string length. Signed-off-by: Juan Zea Acked-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- tools/usb/usbip/src/utils.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c index 2b3d6d2350158b..3d7b42e7729941 100644 --- a/tools/usb/usbip/src/utils.c +++ b/tools/usb/usbip/src/utils.c @@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add) char command[SYSFS_BUS_ID_SIZE + 4]; char match_busid_attr_path[SYSFS_PATH_MAX]; int rc; + int cmd_size; snprintf(match_busid_attr_path, sizeof(match_busid_attr_path), "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME, @@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add) attr_name); if (add) - snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid); + cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", + busid); else - snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid); + cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", + busid); rc = write_sysfs_attribute(match_busid_attr_path, command, - sizeof(command)); + cmd_size); if (rc < 0) { dbg("failed to write match_busid: %s", strerror(errno)); return -1; From 6b8335e48ae7171206eda3ae572f0cd0c75ca8a1 Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Fri, 15 Dec 2017 10:50:09 -0700 Subject: [PATCH 1173/3326] usbip: prevent leaking socket pointer address in messages commit 90120d15f4c397272aaf41077960a157fc4212bf upstream. usbip driver is leaking socket pointer address in messages. Remove the messages that aren't useful and print sockfd in the ones that are useful for debugging. Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usbip/stub_dev.c | 3 +-- drivers/usb/usbip/usbip_common.c | 16 +++++----------- drivers/usb/usbip/vhci_hcd.c | 2 +- 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index c653ce533430ac..720408d39f1134 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -163,8 +163,7 @@ static void stub_shutdown_connection(struct usbip_device *ud) * step 1? */ if (ud->tcp_socket) { - dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n", - ud->tcp_socket); + dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd); kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); } diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 2281f3562870a0..17b599b923f367 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -331,26 +331,20 @@ int usbip_recv(struct socket *sock, void *buf, int size) struct msghdr msg = {.msg_flags = MSG_NOSIGNAL}; int total = 0; + if (!sock || !buf || !size) + return -EINVAL; + iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size); usbip_dbg_xmit("enter\n"); - if (!sock || !buf || !size) { - pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf, - size); - return -EINVAL; - } - do { - int sz = msg_data_left(&msg); + msg_data_left(&msg); sock->sk->sk_allocation = GFP_NOIO; result = sock_recvmsg(sock, &msg, MSG_WAITALL); - if (result <= 0) { - pr_debug("receive sock %p buf %p size %u ret %d total %d\n", - sock, buf + total, sz, result, total); + if (result <= 0) goto err; - } total += result; } while (msg_data_left(&msg)); diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 1f0cf81cc14576..4eec1d796a2d54 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -989,7 +989,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud) /* need this? see stub_dev.c */ if (ud->tcp_socket) { - pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket); + pr_debug("shutdown tcp_socket %d\n", ud->sockfd); kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); } From ed4db9a7f8cb9e83605e63e7656a6efb56846ebb Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Mon, 18 Dec 2017 17:23:37 -0700 Subject: [PATCH 1174/3326] usbip: stub: stop printing kernel pointer addresses in messages commit 248a22044366f588d46754c54dfe29ffe4f8b4df upstream. Remove and/or change debug, info. and error messages to not print kernel pointer addresses. Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usbip/stub_main.c | 5 +++-- drivers/usb/usbip/stub_rx.c | 7 ++----- drivers/usb/usbip/stub_tx.c | 6 +++--- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index 7170404e897983..6968c906fa2916 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c @@ -251,11 +251,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev) struct stub_priv *priv; struct urb *urb; - dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev); + dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n"); while ((priv = stub_priv_pop(sdev))) { urb = priv->urb; - dev_dbg(&sdev->udev->dev, "free urb %p\n", urb); + dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n", + priv->seqnum); usb_kill_urb(urb); kmem_cache_free(stub_priv_cache, priv); diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index 283a9be77a229e..5b807185f79ec9 100644 --- a/drivers/usb/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c @@ -225,9 +225,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev, if (priv->seqnum != pdu->u.cmd_unlink.seqnum) continue; - dev_info(&priv->urb->dev->dev, "unlink urb %p\n", - priv->urb); - /* * This matched urb is not completed yet (i.e., be in * flight in usb hcd hardware/driver). Now we are @@ -266,8 +263,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev, ret = usb_unlink_urb(priv->urb); if (ret != -EINPROGRESS) dev_err(&priv->urb->dev->dev, - "failed to unlink a urb %p, ret %d\n", - priv->urb, ret); + "failed to unlink a urb # %lu, ret %d\n", + priv->seqnum, ret); return 0; } diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c index 87ff94be4235eb..96aa375b80d9cc 100644 --- a/drivers/usb/usbip/stub_tx.c +++ b/drivers/usb/usbip/stub_tx.c @@ -102,7 +102,7 @@ void stub_complete(struct urb *urb) /* link a urb to the queue of tx. */ spin_lock_irqsave(&sdev->priv_lock, flags); if (sdev->ud.tcp_socket == NULL) { - usbip_dbg_stub_tx("ignore urb for closed connection %p", urb); + usbip_dbg_stub_tx("ignore urb for closed connection\n"); /* It will be freed in stub_device_cleanup_urbs(). */ } else if (priv->unlinking) { stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status); @@ -204,8 +204,8 @@ static int stub_send_ret_submit(struct stub_device *sdev) /* 1. setup usbip_header */ setup_ret_submit_pdu(&pdu_header, urb); - usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", - pdu_header.base.seqnum, urb); + usbip_dbg_stub_tx("setup txdata seqnum: %d\n", + pdu_header.base.seqnum); usbip_header_correct_endian(&pdu_header, 1); iov[iovnum].iov_base = &pdu_header; From 2d6483bf78f859b827ea97a16b68cf64c3b56e67 Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Mon, 18 Dec 2017 17:24:22 -0700 Subject: [PATCH 1175/3326] usbip: vhci: stop printing kernel pointer addresses in messages commit 8272d099d05f7ab2776cf56a2ab9f9443be18907 upstream. Remove and/or change debug, info. and error messages to not print kernel pointer addresses. Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usbip/vhci_hcd.c | 10 ---------- drivers/usb/usbip/vhci_rx.c | 23 +++++++++++------------ drivers/usb/usbip/vhci_tx.c | 3 ++- 3 files changed, 13 insertions(+), 23 deletions(-) diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 4eec1d796a2d54..692cfdef667e76 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -670,9 +670,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag struct vhci_device *vdev; unsigned long flags; - usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n", - hcd, urb, mem_flags); - if (portnum > VHCI_HC_PORTS) { pr_err("invalid port number %d\n", portnum); return -ENODEV; @@ -836,8 +833,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) struct vhci_device *vdev; unsigned long flags; - pr_info("dequeue a urb %p\n", urb); - spin_lock_irqsave(&vhci->lock, flags); priv = urb->hcpriv; @@ -865,7 +860,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) /* tcp connection is closed */ spin_lock(&vdev->priv_lock); - pr_info("device %p seems to be disconnected\n", vdev); list_del(&priv->list); kfree(priv); urb->hcpriv = NULL; @@ -877,8 +871,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) * vhci_rx will receive RET_UNLINK and give back the URB. * Otherwise, we give back it here. */ - pr_info("gives back urb %p\n", urb); - usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&vhci->lock, flags); @@ -906,8 +898,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) unlink->unlink_seqnum = priv->seqnum; - pr_info("device %p seems to be still connected\n", vdev); - /* send cmd_unlink and try to cancel the pending URB in the * peer */ list_add_tail(&unlink->list, &vdev->unlink_tx); diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c index ef2f2d5ca6b2f9..1343037d00f9a9 100644 --- a/drivers/usb/usbip/vhci_rx.c +++ b/drivers/usb/usbip/vhci_rx.c @@ -37,24 +37,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum) urb = priv->urb; status = urb->status; - usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n", - urb, priv, seqnum); + usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum); switch (status) { case -ENOENT: /* fall through */ case -ECONNRESET: - dev_info(&urb->dev->dev, - "urb %p was unlinked %ssynchronuously.\n", urb, - status == -ENOENT ? "" : "a"); + dev_dbg(&urb->dev->dev, + "urb seq# %u was unlinked %ssynchronuously\n", + seqnum, status == -ENOENT ? "" : "a"); break; case -EINPROGRESS: /* no info output */ break; default: - dev_info(&urb->dev->dev, - "urb %p may be in a error, status %d\n", urb, - status); + dev_dbg(&urb->dev->dev, + "urb seq# %u may be in a error, status %d\n", + seqnum, status); } list_del(&priv->list); @@ -81,8 +80,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, spin_unlock_irqrestore(&vdev->priv_lock, flags); if (!urb) { - pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum); - pr_info("max seqnum %d\n", + pr_err("cannot find a urb of seqnum %u max seqnum %d\n", + pdu->base.seqnum, atomic_read(&vhci_hcd->seqnum)); usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return; @@ -105,7 +104,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, if (usbip_dbg_flag_vhci_rx) usbip_dump_urb(urb); - usbip_dbg_vhci_rx("now giveback urb %p\n", urb); + usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum); spin_lock_irqsave(&vhci->lock, flags); usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb); @@ -172,7 +171,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev, pr_info("the urb (seqnum %d) was already given back\n", pdu->base.seqnum); } else { - usbip_dbg_vhci_rx("now giveback urb %p\n", urb); + usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum); /* If unlink is successful, status is -ECONNRESET */ urb->status = pdu->u.ret_unlink.status; diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c index 3e7878fe2fd463..a9a663a578b623 100644 --- a/drivers/usb/usbip/vhci_tx.c +++ b/drivers/usb/usbip/vhci_tx.c @@ -83,7 +83,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev) memset(&msg, 0, sizeof(msg)); memset(&iov, 0, sizeof(iov)); - usbip_dbg_vhci_tx("setup txdata urb %p\n", urb); + usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n", + priv->seqnum); /* 1. setup usbip_header */ setup_cmd_submit_pdu(&pdu_header, urb); From 1fcd9859a4b3f1df7316ff01a9993695ab929727 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 13 Nov 2017 11:12:58 +0100 Subject: [PATCH 1176/3326] USB: chipidea: msm: fix ulpi-node lookup commit 964728f9f407eca0b417fdf8e784b7a76979490c upstream. Fix child-node lookup during probe, which ended up searching the whole device tree depth-first starting at the parent rather than just matching on its children. Note that the original premature free of the parent node has already been fixed separately, but that fix was apparently never backported to stable. Fixes: 47654a162081 ("usb: chipidea: msm: Restore wrapper settings after reset") Fixes: b74c43156c0c ("usb: chipidea: msm: ci_hdrc_msm_probe() missing of_node_get()") Cc: Stephen Boyd Cc: Frank Rowand Signed-off-by: Johan Hovold Signed-off-by: Peter Chen Signed-off-by: Greg Kroah-Hartman --- drivers/usb/chipidea/ci_hdrc_msm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c index bb626120296ff2..53f3bf459dd1b0 100644 --- a/drivers/usb/chipidea/ci_hdrc_msm.c +++ b/drivers/usb/chipidea/ci_hdrc_msm.c @@ -251,7 +251,7 @@ static int ci_hdrc_msm_probe(struct platform_device *pdev) if (ret) goto err_mux; - ulpi_node = of_find_node_by_name(of_node_get(pdev->dev.of_node), "ulpi"); + ulpi_node = of_get_child_by_name(pdev->dev.of_node, "ulpi"); if (ulpi_node) { phy_node = of_get_next_available_child(ulpi_node, NULL); ci->hsic = of_device_is_compatible(phy_node, "qcom,usb-hsic-phy"); From ad61ff29f1049448fa389ba9ad464e49bdcdd61c Mon Sep 17 00:00:00 2001 From: Max Schulze Date: Wed, 20 Dec 2017 20:47:44 +0100 Subject: [PATCH 1177/3326] USB: serial: ftdi_sio: add id for Airbus DS P8GR commit c6a36ad383559a60a249aa6016cebf3cb8b6c485 upstream. Add AIRBUS_DS_P8GR device IDs to ftdi_sio driver. Signed-off-by: Max Schulze Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/ftdi_sio.c | 1 + drivers/usb/serial/ftdi_sio_ids.h | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 49d1b2d4606d38..d038e543c24629 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1017,6 +1017,7 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, + { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 4faa09fe308ca0..8b4ecd2bd297b8 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -914,6 +914,12 @@ #define ICPDAS_I7561U_PID 0x0104 #define ICPDAS_I7563U_PID 0x0105 +/* + * Airbus Defence and Space + */ +#define AIRBUS_DS_VID 0x1e8e /* Vendor ID */ +#define AIRBUS_DS_P8GR 0x6001 /* Tetra P8GR */ + /* * RT Systems programming cables for various ham radios */ From eb6cc0af22a3b0c982e3a61de63736b2c667a94e Mon Sep 17 00:00:00 2001 From: Reinhard Speyerer Date: Fri, 15 Dec 2017 00:39:27 +0100 Subject: [PATCH 1178/3326] USB: serial: qcserial: add Sierra Wireless EM7565 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 92a18a657fb2e2ffbfa0659af32cc18fd2346516 upstream. Sierra Wireless EM7565 devices use the QCSERIAL_SWI layout for their serial ports T: Bus=01 Lev=03 Prnt=29 Port=01 Cnt=02 Dev#= 31 Spd=480 MxCh= 0 D: Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1 P: Vendor=1199 ProdID=9091 Rev= 0.06 S: Manufacturer=Sierra Wireless, Incorporated S: Product=Sierra Wireless EM7565 Qualcomm Snapdragon X16 LTE-A S: SerialNumber=xxxxxxxx C:* #Ifs= 4 Cfg#= 1 Atr=a0 MxPwr=500mA I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=qcserial E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=qcserial E: Ad=83(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=qcserial E: Ad=85(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 8 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan E: Ad=86(I) Atr=03(Int.) MxPS= 8 Ivl=32ms E: Ad=8e(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=0f(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms but need sendsetup = true for the NMEA port to make it work properly. Simplify the patch compared to v1 as suggested by Bjørn Mork by taking advantage of the fact that existing devices work with sendsetup = true too. Use sendsetup = true for the NMEA interface of QCSERIAL_SWI and add DEVICE_SWI entries for the EM7565 PID 0x9091 and the EM7565 QDL PID 0x9090. Tests with several MC73xx/MC74xx/MC77xx devices have been performed in order to verify backward compatibility. Signed-off-by: Reinhard Speyerer Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/qcserial.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 9f9d3a9044642d..55a8fb25ce2be2 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */ {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ + {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */ + {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */ {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ @@ -346,6 +348,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) break; case 2: dev_dbg(dev, "NMEA GPS interface found\n"); + sendsetup = true; break; case 3: dev_dbg(dev, "Modem port found\n"); From 9fa3c3b5598eec2040fa466941a59bfc15f03f17 Mon Sep 17 00:00:00 2001 From: Daniele Palmas Date: Thu, 14 Dec 2017 16:54:45 +0100 Subject: [PATCH 1179/3326] USB: serial: option: add support for Telit ME910 PID 0x1101 commit 08933099e6404f588f81c2050bfec7313e06eeaf upstream. This patch adds support for PID 0x1101 of Telit ME910. Signed-off-by: Daniele Palmas Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 54e316b1892d73..08684e0bc7abd8 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -283,6 +283,7 @@ static void option_instat_callback(struct urb *urb); #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 #define TELIT_PRODUCT_ME910 0x1100 +#define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101 #define TELIT_PRODUCT_LE920 0x1200 #define TELIT_PRODUCT_LE910 0x1201 #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 @@ -648,6 +649,11 @@ static const struct option_blacklist_info telit_me910_blacklist = { .reserved = BIT(1) | BIT(3), }; +static const struct option_blacklist_info telit_me910_dual_modem_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(3), +}; + static const struct option_blacklist_info telit_le910_blacklist = { .sendsetup = BIT(0), .reserved = BIT(1) | BIT(2), @@ -1247,6 +1253,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = (kernel_ulong_t)&telit_me910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), + .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), From 57211f0cf174f8fdf3dffcff99fad25f376bb2b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?SZ=20Lin=20=28=E6=9E=97=E4=B8=8A=E6=99=BA=29?= Date: Tue, 19 Dec 2017 17:40:32 +0800 Subject: [PATCH 1180/3326] USB: serial: option: adding support for YUGA CLM920-NC5 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 3920bb713038810f25770e7545b79f204685c8f2 upstream. This patch adds support for YUGA CLM920-NC5 PID 0x9625 USB modem to option driver. Interface layout: 0: QCDM/DIAG 1: ADB 2: MODEM 3: AT 4: RMNET Signed-off-by: Taiyi Wu Signed-off-by: SZ Lin (林上智) Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 08684e0bc7abd8..a9400458cceaaf 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -236,6 +236,8 @@ static void option_instat_callback(struct urb *urb); /* These Quectel products use Qualcomm's vendor ID */ #define QUECTEL_PRODUCT_UC20 0x9003 #define QUECTEL_PRODUCT_UC15 0x9090 +/* These Yuga products use Qualcomm's vendor ID */ +#define YUGA_PRODUCT_CLM920_NC5 0x9625 #define QUECTEL_VENDOR_ID 0x2c7c /* These Quectel products use Quectel's vendor ID */ @@ -683,6 +685,10 @@ static const struct option_blacklist_info cinterion_rmnet2_blacklist = { .reserved = BIT(4) | BIT(5), }; +static const struct option_blacklist_info yuga_clm920_nc5_blacklist = { + .reserved = BIT(1) | BIT(4), +}; + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -1187,6 +1193,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + /* Yuga products use Qualcomm vendor ID */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5), + .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist }, /* Quectel products using Quectel vendor ID */ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, From b9d02d3c5899b0abcb2df72252d7fc2f57028bfb Mon Sep 17 00:00:00 2001 From: Dmitry Fleytman Dmitry Fleytman Date: Tue, 19 Dec 2017 06:02:04 +0200 Subject: [PATCH 1181/3326] usb: Add device quirk for Logitech HD Pro Webcam C925e commit 7f038d256c723dd390d2fca942919573995f4cfd upstream. Commit e0429362ab15 ("usb: Add device quirk for Logitech HD Pro Webcams C920 and C930e") introduced quirk to workaround an issue with some Logitech webcams. There is one more model that has the same issue - C925e, so applying the same quirk as well. See aforementioned commit message for detailed explanation of the problem. Signed-off-by: Dmitry Fleytman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/quirks.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 50010282c010f6..60674a932c77e9 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -57,10 +57,11 @@ static const struct usb_device_id usb_quirk_list[] = { /* Microsoft LifeCam-VX700 v2.0 */ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, - /* Logitech HD Pro Webcams C920, C920-C and C930e */ + /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT }, /* Logitech ConferenceCam CC3000e */ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, From e2f33e5983cb1609805e267a3d2ef46d78257b2c Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Tue, 12 Dec 2017 16:11:30 +0100 Subject: [PATCH 1182/3326] usb: add RESET_RESUME for ELSA MicroLink 56K commit b9096d9f15c142574ebebe8fbb137012bb9d99c2 upstream. This modem needs this quirk to operate. It produces timeouts when resumed without reset. Signed-off-by: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/quirks.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 60674a932c77e9..c05c4f87775043 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -155,6 +155,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */ { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM }, + /* ELSA MicroLink 56K */ + { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, From c7c4e00a66081ad6161f009a0c329b9a36085766 Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Tue, 19 Dec 2017 11:14:42 +0200 Subject: [PATCH 1183/3326] USB: Fix off by one in type-specific length check of BOS SSP capability commit 07b9f12864d16c3a861aef4817eb1efccbc5d0e6 upstream. USB 3.1 devices are not detected as 3.1 capable since 4.15-rc3 due to a off by one in commit 81cf4a45360f ("USB: core: Add type-specific length check of BOS descriptors") It uses USB_DT_USB_SSP_CAP_SIZE() to get SSP capability size which takes the zero based SSAC as argument, not the actual count of sublink speed attributes. USB3 spec 9.6.2.5 says "The number of Sublink Speed Attributes = SSAC + 1." The type-specific length check patch was added to stable and needs to be fixed there as well Fixes: 81cf4a45360f ("USB: core: Add type-specific length check of BOS descriptors") CC: Masakazu Mokuno Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/config.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 843ef46d2537eb..9e3355b9739618 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -1007,7 +1007,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) case USB_SSP_CAP_TYPE: ssp_cap = (struct usb_ssp_cap_descriptor *)buffer; ssac = (le32_to_cpu(ssp_cap->bmAttributes) & - USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1; + USB_SSP_SUBLINK_SPEED_ATTRIBS); if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac)) dev->bos->ssp_cap = ssp_cap; break; From f0f18aa8f70171f3e184b4185e673d7d4694300e Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Thu, 21 Dec 2017 15:06:15 +0200 Subject: [PATCH 1184/3326] usb: xhci: Add XHCI_TRUST_TX_LENGTH for Renesas uPD720201 commit da99706689481717998d1d48edd389f339eea979 upstream. When plugging in a USB webcam I see the following message: xhci_hcd 0000:04:00.0: WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk? handle_tx_event: 913 callbacks suppressed All is quiet again with this patch (and I've done a fair but of soak testing with the camera since). Signed-off-by: Daniel Thompson Acked-by: Ard Biesheuvel Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-pci.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 76f39295473363..abb8f19ae40f2a 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -189,6 +189,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_TRUST_TX_LENGTH; xhci->quirks |= XHCI_BROKEN_STREAMS; } + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && + pdev->device == 0x0014) + xhci->quirks |= XHCI_TRUST_TX_LENGTH; if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0015) xhci->quirks |= XHCI_RESET_ON_RESUME; From e4fb2e7e92ec638f0bd489c56b4b6d9004fd1f40 Mon Sep 17 00:00:00 2001 From: Anna-Maria Gleixner Date: Fri, 22 Dec 2017 15:51:12 +0100 Subject: [PATCH 1185/3326] timers: Use deferrable base independent of base::nohz_active commit ced6d5c11d3e7b342f1a80f908e6756ebd4b8ddd upstream. During boot and before base::nohz_active is set in the timer bases, deferrable timers are enqueued into the standard timer base. This works correctly as long as base::nohz_active is false. Once it base::nohz_active is set and a timer which was enqueued before that is accessed the lock selector code choses the lock of the deferred base. This causes unlocked access to the standard base and in case the timer is removed it does not clear the pending flag in the standard base bitmap which causes get_next_timer_interrupt() to return bogus values. To prevent that, the deferrable timers must be enqueued in the deferrable base, even when base::nohz_active is not set. Those deferrable timers also need to be expired unconditional. Fixes: 500462a9de65 ("timers: Switch to a non-cascading wheel") Signed-off-by: Anna-Maria Gleixner Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Sebastian Siewior Cc: rt@linutronix.de Cc: Paul McKenney Link: https://lkml.kernel.org/r/20171222145337.633328378@linutronix.de Signed-off-by: Greg Kroah-Hartman --- kernel/time/timer.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/kernel/time/timer.c b/kernel/time/timer.c index f2674a056c268a..fdfaf4f3bcfaa1 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -814,11 +814,10 @@ static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); /* - * If the timer is deferrable and nohz is active then we need to use - * the deferrable base. + * If the timer is deferrable and NO_HZ_COMMON is set then we need + * to use the deferrable base. */ - if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active && - (tflags & TIMER_DEFERRABLE)) + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); return base; } @@ -828,11 +827,10 @@ static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); /* - * If the timer is deferrable and nohz is active then we need to use - * the deferrable base. + * If the timer is deferrable and NO_HZ_COMMON is set then we need + * to use the deferrable base. */ - if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active && - (tflags & TIMER_DEFERRABLE)) + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) base = this_cpu_ptr(&timer_bases[BASE_DEF]); return base; } @@ -1644,7 +1642,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) base->must_forward_clk = false; __run_timers(base); - if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) + if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); } From 8f1aa64ab08621cc3d2b1600ab80da5d3128abb6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 22 Dec 2017 15:51:14 +0100 Subject: [PATCH 1186/3326] timers: Invoke timer_start_debug() where it makes sense commit fd45bb77ad682be728d1002431d77b8c73342836 upstream. The timer start debug function is called before the proper timer base is set. As a consequence the trace data contains the stale CPU and flags values. Call the debug function after setting the new base and flags. Fixes: 500462a9de65 ("timers: Switch to a non-cascading wheel") Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Sebastian Siewior Cc: rt@linutronix.de Cc: Paul McKenney Cc: Anna-Maria Gleixner Link: https://lkml.kernel.org/r/20171222145337.792907137@linutronix.de Signed-off-by: Greg Kroah-Hartman --- kernel/time/timer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/time/timer.c b/kernel/time/timer.c index fdfaf4f3bcfaa1..a4d095e1010ead 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -982,8 +982,6 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) if (!ret && pending_only) goto out_unlock; - debug_activate(timer, expires); - new_base = get_target_base(base, timer->flags); if (base != new_base) { @@ -1007,6 +1005,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) } } + debug_activate(timer, expires); + timer->expires = expires; /* * If 'idx' was calculated above and the base time did not advance From 6fae6de72ad44e98b5ae58a662d110c58594aad9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 27 Dec 2017 21:37:25 +0100 Subject: [PATCH 1187/3326] timers: Reinitialize per cpu bases on hotplug commit 26456f87aca7157c057de65c9414b37f1ab881d1 upstream. The timer wheel bases are not (re)initialized on CPU hotplug. That leaves them with a potentially stale clk and next_expiry valuem, which can cause trouble then the CPU is plugged. Add a prepare callback which forwards the clock, sets next_expiry to far in the future and reset the control flags to a known state. Set base->must_forward_clk so the first timer which is queued will try to forward the clock to current jiffies. Fixes: 500462a9de65 ("timers: Switch to a non-cascading wheel") Reported-by: Paul E. McKenney Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Sebastian Siewior Cc: Anna-Maria Gleixner Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1712272152200.2431@nanos Signed-off-by: Greg Kroah-Hartman --- include/linux/cpuhotplug.h | 2 +- include/linux/timer.h | 4 +++- kernel/cpu.c | 4 ++-- kernel/time/timer.c | 15 +++++++++++++++ 4 files changed, 21 insertions(+), 4 deletions(-) diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 2477a5cb5bd54a..fb83dee528a1c2 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -86,7 +86,7 @@ enum cpuhp_state { CPUHP_MM_ZSWP_POOL_PREPARE, CPUHP_KVM_PPC_BOOK3S_PREPARE, CPUHP_ZCOMP_PREPARE, - CPUHP_TIMERS_DEAD, + CPUHP_TIMERS_PREPARE, CPUHP_MIPS_SOC_PREPARE, CPUHP_BP_PREPARE_DYN, CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, diff --git a/include/linux/timer.h b/include/linux/timer.h index ac66f29c69169a..e0ea1fe87572e3 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -246,9 +246,11 @@ unsigned long round_jiffies_up(unsigned long j); unsigned long round_jiffies_up_relative(unsigned long j); #ifdef CONFIG_HOTPLUG_CPU +int timers_prepare_cpu(unsigned int cpu); int timers_dead_cpu(unsigned int cpu); #else -#define timers_dead_cpu NULL +#define timers_prepare_cpu NULL +#define timers_dead_cpu NULL #endif #endif diff --git a/kernel/cpu.c b/kernel/cpu.c index 7891aecc6aec79..f21bfa3172d8a0 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1277,9 +1277,9 @@ static struct cpuhp_step cpuhp_bp_states[] = { * before blk_mq_queue_reinit_notify() from notify_dead(), * otherwise a RCU stall occurs. */ - [CPUHP_TIMERS_DEAD] = { + [CPUHP_TIMERS_PREPARE] = { .name = "timers:dead", - .startup.single = NULL, + .startup.single = timers_prepare_cpu, .teardown.single = timers_dead_cpu, }, /* Kicks the plugged cpu into life */ diff --git a/kernel/time/timer.c b/kernel/time/timer.c index a4d095e1010ead..73e3cdbc61f108 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1801,6 +1801,21 @@ static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *h } } +int timers_prepare_cpu(unsigned int cpu) +{ + struct timer_base *base; + int b; + + for (b = 0; b < NR_BASES; b++) { + base = per_cpu_ptr(&timer_bases[b], cpu); + base->clk = jiffies; + base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; + base->is_idle = false; + base->must_forward_clk = true; + } + return 0; +} + int timers_dead_cpu(unsigned int cpu) { struct timer_base *old_base; From d87f1bc7d15b89bd3bcf31020eb7f3b3cd6f84b5 Mon Sep 17 00:00:00 2001 From: Todd Kjos Date: Mon, 27 Nov 2017 09:32:33 -0800 Subject: [PATCH 1188/3326] binder: fix proc->files use-after-free commit 7f3dc0088b98533f17128058fac73cd8b2752ef1 upstream. proc->files cleanup is initiated by binder_vma_close. Therefore a reference on the binder_proc is not enough to prevent the files_struct from being released while the binder_proc still has a reference. This can lead to an attempt to dereference the stale pointer obtained from proc->files prior to proc->files cleanup. This has been seen once in task_get_unused_fd_flags() when __alloc_fd() is called with a stale "files". The fix is to protect proc->files with a mutex to prevent cleanup while in use. Signed-off-by: Todd Kjos Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 44 ++++++++++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 88b4bbe58100d6..a340766b51fead 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -482,7 +482,8 @@ enum binder_deferred_state { * @tsk task_struct for group_leader of process * (invariant after initialized) * @files files_struct for process - * (invariant after initialized) + * (protected by @files_lock) + * @files_lock mutex to protect @files * @deferred_work_node: element for binder_deferred_list * (protected by binder_deferred_lock) * @deferred_work: bitmap of deferred work to perform @@ -530,6 +531,7 @@ struct binder_proc { int pid; struct task_struct *tsk; struct files_struct *files; + struct mutex files_lock; struct hlist_node deferred_work_node; int deferred_work; bool is_dead; @@ -877,20 +879,26 @@ static void binder_inc_node_tmpref_ilocked(struct binder_node *node); static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) { - struct files_struct *files = proc->files; unsigned long rlim_cur; unsigned long irqs; + int ret; - if (files == NULL) - return -ESRCH; - - if (!lock_task_sighand(proc->tsk, &irqs)) - return -EMFILE; - + mutex_lock(&proc->files_lock); + if (proc->files == NULL) { + ret = -ESRCH; + goto err; + } + if (!lock_task_sighand(proc->tsk, &irqs)) { + ret = -EMFILE; + goto err; + } rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); unlock_task_sighand(proc->tsk, &irqs); - return __alloc_fd(files, 0, rlim_cur, flags); + ret = __alloc_fd(proc->files, 0, rlim_cur, flags); +err: + mutex_unlock(&proc->files_lock); + return ret; } /* @@ -899,8 +907,10 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) static void task_fd_install( struct binder_proc *proc, unsigned int fd, struct file *file) { + mutex_lock(&proc->files_lock); if (proc->files) __fd_install(proc->files, fd, file); + mutex_unlock(&proc->files_lock); } /* @@ -910,9 +920,11 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd) { int retval; - if (proc->files == NULL) - return -ESRCH; - + mutex_lock(&proc->files_lock); + if (proc->files == NULL) { + retval = -ESRCH; + goto err; + } retval = __close_fd(proc->files, fd); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || @@ -920,7 +932,8 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd) retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; - +err: + mutex_unlock(&proc->files_lock); return retval; } @@ -4627,7 +4640,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) ret = binder_alloc_mmap_handler(&proc->alloc, vma); if (ret) return ret; + mutex_lock(&proc->files_lock); proc->files = get_files_struct(current); + mutex_unlock(&proc->files_lock); return 0; err_bad_arg: @@ -4651,6 +4666,7 @@ static int binder_open(struct inode *nodp, struct file *filp) spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); proc->tsk = current->group_leader; + mutex_init(&proc->files_lock); INIT_LIST_HEAD(&proc->todo); proc->default_priority = task_nice(current); binder_dev = container_of(filp->private_data, struct binder_device, @@ -4903,9 +4919,11 @@ static void binder_deferred_func(struct work_struct *work) files = NULL; if (defer & BINDER_DEFERRED_PUT_FILES) { + mutex_lock(&proc->files_lock); files = proc->files; if (files) proc->files = NULL; + mutex_unlock(&proc->files_lock); } if (defer & BINDER_DEFERRED_FLUSH) From f3f5fa872d09109edfd7c10c57865301fee396d4 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 15 Nov 2017 10:43:16 +0100 Subject: [PATCH 1189/3326] phy: tegra: fix device-tree node lookups commit 046046737bd35bed047460f080ea47e186be731e upstream. Fix child-node lookups during probe, which ended up searching the whole device tree depth-first starting at the parents rather than just matching on their children. To make things worse, some parent nodes could end up being being prematurely freed (by tegra_xusb_pad_register()) as of_find_node_by_name() drops a reference to its first argument. Fixes: 53d2a715c240 ("phy: Add Tegra XUSB pad controller support") Cc: Thierry Reding Signed-off-by: Johan Hovold Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Greg Kroah-Hartman --- drivers/phy/tegra/xusb.c | 58 ++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 4307bf0013e186..63e916d4d06961 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -75,14 +75,14 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match); static struct device_node * tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name) { - /* - * of_find_node_by_name() drops a reference, so make sure to grab one. - */ - struct device_node *np = of_node_get(padctl->dev->of_node); + struct device_node *pads, *np; + + pads = of_get_child_by_name(padctl->dev->of_node, "pads"); + if (!pads) + return NULL; - np = of_find_node_by_name(np, "pads"); - if (np) - np = of_find_node_by_name(np, name); + np = of_get_child_by_name(pads, name); + of_node_put(pads); return np; } @@ -90,16 +90,16 @@ tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name) static struct device_node * tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index) { - /* - * of_find_node_by_name() drops a reference, so make sure to grab one. - */ - struct device_node *np = of_node_get(pad->dev.of_node); + struct device_node *np, *lanes; - np = of_find_node_by_name(np, "lanes"); - if (!np) + lanes = of_get_child_by_name(pad->dev.of_node, "lanes"); + if (!lanes) return NULL; - return of_find_node_by_name(np, pad->soc->lanes[index].name); + np = of_get_child_by_name(lanes, pad->soc->lanes[index].name); + of_node_put(lanes); + + return np; } static int @@ -195,7 +195,7 @@ int tegra_xusb_pad_register(struct tegra_xusb_pad *pad, unsigned int i; int err; - children = of_find_node_by_name(pad->dev.of_node, "lanes"); + children = of_get_child_by_name(pad->dev.of_node, "lanes"); if (!children) return -ENODEV; @@ -444,21 +444,21 @@ static struct device_node * tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type, unsigned int index) { - /* - * of_find_node_by_name() drops a reference, so make sure to grab one. - */ - struct device_node *np = of_node_get(padctl->dev->of_node); + struct device_node *ports, *np; + char *name; - np = of_find_node_by_name(np, "ports"); - if (np) { - char *name; + ports = of_get_child_by_name(padctl->dev->of_node, "ports"); + if (!ports) + return NULL; - name = kasprintf(GFP_KERNEL, "%s-%u", type, index); - if (!name) - return ERR_PTR(-ENOMEM); - np = of_find_node_by_name(np, name); - kfree(name); + name = kasprintf(GFP_KERNEL, "%s-%u", type, index); + if (!name) { + of_node_put(ports); + return ERR_PTR(-ENOMEM); } + np = of_get_child_by_name(ports, name); + kfree(name); + of_node_put(ports); return np; } @@ -847,7 +847,7 @@ static void tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl) static int tegra_xusb_padctl_probe(struct platform_device *pdev) { - struct device_node *np = of_node_get(pdev->dev.of_node); + struct device_node *np = pdev->dev.of_node; const struct tegra_xusb_padctl_soc *soc; struct tegra_xusb_padctl *padctl; const struct of_device_id *match; @@ -855,7 +855,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev) int err; /* for backwards compatibility with old device trees */ - np = of_find_node_by_name(np, "pads"); + np = of_get_child_by_name(np, "pads"); if (!np) { dev_warn(&pdev->dev, "deprecated DT, using legacy driver\n"); return tegra_xusb_padctl_legacy_probe(pdev); From 2695c0f1f71e6022a6916584a4d19de8968e97c3 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Fri, 17 Nov 2017 11:56:41 +0000 Subject: [PATCH 1190/3326] drivers: base: cacheinfo: fix cache type for non-architected system cache commit f57ab9a01a36ef3454333251cc57e3a9948b17bf upstream. Commit dfea747d2aba ("drivers: base: cacheinfo: support DT overrides for cache properties") doesn't initialise the cache type if it's present only in DT and the architecture is not aware of it. They are unified system level cache which are generally transparent. This patch check if the cache type is set to NOCACHE but the DT node indicates that it's unified cache and sets the cache type accordingly. Fixes: dfea747d2aba ("drivers: base: cacheinfo: support DT overrides for cache properties") Reported-and-tested-by: Tan Xiaojun Cc: Greg Kroah-Hartman Signed-off-by: Sudeep Holla Signed-off-by: Greg Kroah-Hartman --- drivers/base/cacheinfo.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index eb3af2739537a8..07532d83be0bca 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -186,6 +186,11 @@ static void cache_associativity(struct cacheinfo *this_leaf) this_leaf->ways_of_associativity = (size / nr_sets) / line_size; } +static bool cache_node_is_unified(struct cacheinfo *this_leaf) +{ + return of_property_read_bool(this_leaf->of_node, "cache-unified"); +} + static void cache_of_override_properties(unsigned int cpu) { int index; @@ -194,6 +199,14 @@ static void cache_of_override_properties(unsigned int cpu) for (index = 0; index < cache_leaves(cpu); index++) { this_leaf = this_cpu_ci->info_list + index; + /* + * init_cache_level must setup the cache level correctly + * overriding the architecturally specified levels, so + * if type is NONE at this stage, it should be unified + */ + if (this_leaf->type == CACHE_TYPE_NOCACHE && + cache_node_is_unified(this_leaf)) + this_leaf->type = CACHE_TYPE_UNIFIED; cache_size(this_leaf); cache_get_line_size(this_leaf); cache_nr_sets(this_leaf); From 7a3ce39c2bcac61abcb4650b33945335a02ef6cd Mon Sep 17 00:00:00 2001 From: Sushmita Susheelendra Date: Fri, 15 Dec 2017 13:59:13 -0700 Subject: [PATCH 1191/3326] staging: android: ion: Fix dma direction for dma_sync_sg_for_cpu/device commit d6b246bb7a29703f53aa4c050b8b3205d749caee upstream. Use the direction argument passed into begin_cpu_access and end_cpu_access when calling the dma_sync_sg_for_cpu/device. The actual cache primitive called depends on the direction passed in. Signed-off-by: Sushmita Susheelendra Acked-by: Laura Abbott Signed-off-by: Greg Kroah-Hartman --- drivers/staging/android/ion/ion.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 93e2c90fa77d51..83dc3292e9ab1d 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -348,7 +348,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents, - DMA_BIDIRECTIONAL); + direction); } mutex_unlock(&buffer->lock); @@ -370,7 +370,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents, - DMA_BIDIRECTIONAL); + direction); } mutex_unlock(&buffer->lock); From e798502cfb471b95153de2f75a89501c45ec997a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 22 Dec 2017 15:51:13 +0100 Subject: [PATCH 1192/3326] nohz: Prevent a timer interrupt storm in tick_nohz_stop_sched_tick() commit 5d62c183f9e9df1deeea0906d099a94e8a43047a upstream. The conditions in irq_exit() to invoke tick_nohz_irq_exit() which subsequently invokes tick_nohz_stop_sched_tick() are: if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) If need_resched() is not set, but a timer softirq is pending then this is an indication that the softirq code punted and delegated the execution to softirqd. need_resched() is not true because the current interrupted task takes precedence over softirqd. Invoking tick_nohz_irq_exit() in this case can cause an endless loop of timer interrupts because the timer wheel contains an expired timer, but softirqs are not yet executed. So it returns an immediate expiry request, which causes the timer to fire immediately again. Lather, rinse and repeat.... Prevent that by adding a check for a pending timer soft interrupt to the conditions in tick_nohz_stop_sched_tick() which avoid calling get_next_timer_interrupt(). That keeps the tick sched timer on the tick and prevents a repetitive programming of an already expired timer. Reported-by: Sebastian Siewior Signed-off-by: Thomas Gleixner Acked-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul McKenney Cc: Anna-Maria Gleixner Cc: Sebastian Siewior Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1712272156050.2431@nanos Signed-off-by: Greg Kroah-Hartman --- kernel/time/tick-sched.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index e4d463adefc226..dfa4a117fee34b 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -674,6 +674,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) ts->next_tick = 0; } +static inline bool local_timer_softirq_pending(void) +{ + return local_softirq_pending() & TIMER_SOFTIRQ; +} + static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, ktime_t now, int cpu) { @@ -690,8 +695,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, } while (read_seqretry(&jiffies_lock, seq)); ts->last_jiffies = basejiff; - if (rcu_needs_cpu(basemono, &next_rcu) || - arch_needs_cpu() || irq_work_needs_cpu()) { + /* + * Keep the periodic tick, when RCU, architecture or irq_work + * requests it. + * Aside of that check whether the local timer softirq is + * pending. If so its a bad idea to call get_next_timer_interrupt() + * because there is an already expired timer, so it will request + * immeditate expiry, which rearms the hardware timer with a + * minimal delta which brings us back to this place + * immediately. Lather, rinse and repeat... + */ + if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || + irq_work_needs_cpu() || local_timer_softirq_pending()) { next_tick = basemono + TICK_NSEC; } else { /* From b5bef29785ff990a90238d4f59ccbeb656134eeb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 30 Dec 2017 22:13:53 +0100 Subject: [PATCH 1193/3326] x86/smpboot: Remove stale TLB flush invocations commit 322f8b8b340c824aef891342b0f5795d15e11562 upstream. smpboot_setup_warm_reset_vector() and smpboot_restore_warm_reset_vector() invoke local_flush_tlb() for no obvious reason. Digging in history revealed that the original code in the 2.1 era added those because the code manipulated a swapper_pg_dir pagetable entry. The pagetable manipulation was removed long ago in the 2.3 timeframe, but the TLB flush invocations stayed around forever. Remove them along with the pointless pr_debug()s which come from the same 2.1 change. Reported-by: Dominik Brodowski Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: Linus Torvalds Cc: Linus Torvalds Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20171230211829.586548655@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/smpboot.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 12bf07d44dfed2..2651ca2112c45b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -128,25 +128,16 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) spin_lock_irqsave(&rtc_lock, flags); CMOS_WRITE(0xa, 0xf); spin_unlock_irqrestore(&rtc_lock, flags); - local_flush_tlb(); - pr_debug("1.\n"); *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = start_eip >> 4; - pr_debug("2.\n"); *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = start_eip & 0xf; - pr_debug("3.\n"); } static inline void smpboot_restore_warm_reset_vector(void) { unsigned long flags; - /* - * Install writable page 0 entry to set BIOS data area. - */ - local_flush_tlb(); - /* * Paranoid: Set warm reset code and vector here back * to default values. From 082b7521a541a66f1fed68b1f76de6a3b910495e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 30 Dec 2017 22:13:54 +0100 Subject: [PATCH 1194/3326] x86/mm: Remove preempt_disable/enable() from __native_flush_tlb() commit decab0888e6e14e11d53cefa85f8b3d3b45ce73c upstream. The preempt_disable/enable() pair in __native_flush_tlb() was added in commit: 5cf0791da5c1 ("x86/mm: Disable preemption during CR3 read+write") ... to protect the UP variant of flush_tlb_mm_range(). That preempt_disable/enable() pair should have been added to the UP variant of flush_tlb_mm_range() instead. The UP variant was removed with commit: ce4a4e565f52 ("x86/mm: Remove the UP asm/tlbflush.h code, always use the (formerly) SMP code") ... but the preempt_disable/enable() pair stayed around. The latest change to __native_flush_tlb() in commit: 6fd166aae78c ("x86/mm: Use/Fix PCID to optimize user/kernel switches") ... added an access to a per CPU variable outside the preempt disabled regions, which makes no sense at all. __native_flush_tlb() must always be called with at least preemption disabled. Remove the preempt_disable/enable() pair and add a WARN_ON_ONCE() to catch bad callers independent of the smp_processor_id() debugging. Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: Dominik Brodowski Cc: Linus Torvalds Cc: Linus Torvalds Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20171230211829.679325424@linutronix.de Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/tlbflush.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index b519da4fc03c7c..f9b48ce152ebac 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -345,15 +345,17 @@ static inline void invalidate_user_asid(u16 asid) */ static inline void __native_flush_tlb(void) { - invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); /* - * If current->mm == NULL then we borrow a mm which may change - * during a task switch and therefore we must not be preempted - * while we write CR3 back: + * Preemption or interrupts must be disabled to protect the access + * to the per CPU variable and to prevent being preempted between + * read_cr3() and write_cr3(). */ - preempt_disable(); + WARN_ON_ONCE(preemptible()); + + invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); + + /* If current->mm == NULL then the read_cr3() "borrows" an mm */ native_write_cr3(__native_read_cr3()); - preempt_enable(); } /* From 530f5fa1600b6af56b138f9fb87b7f5148c979a0 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 27 Dec 2017 11:48:50 -0800 Subject: [PATCH 1195/3326] x86-32: Fix kexec with stack canary (CONFIG_CC_STACKPROTECTOR) commit ac461122c88a10b7d775de2f56467f097c9e627a upstream. Commit e802a51ede91 ("x86/idt: Consolidate IDT invalidation") cleaned up and unified the IDT invalidation that existed in a couple of places. It changed no actual real code. Despite not changing any actual real code, it _did_ change code generation: by implementing the common idt_invalidate() function in archx86/kernel/idt.c, it made the use of the function in arch/x86/kernel/machine_kexec_32.c be a real function call rather than an (accidental) inlining of the function. That, in turn, exposed two issues: - in load_segments(), we had incorrectly reset all the segment registers, which then made the stack canary load (which gcc does using offset of %gs) cause a trap. Instead of %gs pointing to the stack canary, it will be the normal zero-based kernel segment, and the stack canary load will take a page fault at address 0x14. - to make this even harder to debug, we had invalidated the GDT just before calling idt_invalidate(), which meant that the fault happened with an invalid GDT, which in turn causes a triple fault and immediate reboot. Fix this by (a) not reloading the special segments in load_segments(). We currently don't do any percpu accesses (which would require %fs on x86-32) in this area, but there's no reason to think that we might not want to do them, and like %gs, it's pointless to break it. (b) doing idt_invalidate() before invalidating the GDT, to keep things at least _slightly_ more debuggable for a bit longer. Without a IDT, traps will not work. Without a GDT, traps also will not work, but neither will any segment loads etc. So in a very real sense, the GDT is even more core than the IDT. Fixes: e802a51ede91 ("x86/idt: Consolidate IDT invalidation") Reported-and-tested-by: Alexandru Chirvasitu Signed-off-by: Linus Torvalds Signed-off-by: Thomas Gleixner Cc: Denys Vlasenko Cc: Peter Zijlstra Cc: Brian Gerst Cc: Steven Rostedt Cc: Borislav Petkov Cc: Andy Lutomirski Cc: Josh Poimboeuf Link: https://lkml.kernel.org/r/alpine.LFD.2.21.1712271143180.8572@i7.lan Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/machine_kexec_32.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 00bc751c861ce8..edfede76868870 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -48,8 +48,6 @@ static void load_segments(void) "\tmovl $"STR(__KERNEL_DS)",%%eax\n" "\tmovl %%eax,%%ds\n" "\tmovl %%eax,%%es\n" - "\tmovl %%eax,%%fs\n" - "\tmovl %%eax,%%gs\n" "\tmovl %%eax,%%ss\n" : : : "eax", "memory"); #undef STR @@ -232,8 +230,8 @@ void machine_kexec(struct kimage *image) * The gdt & idt are now invalid. * If you want to load them you must set up your own idt & gdt. */ - set_gdt(phys_to_virt(0), 0); idt_invalidate(phys_to_virt(0)); + set_gdt(phys_to_virt(0), 0); /* now call it */ image->start = relocate_kernel_ptr((unsigned long)image->head, From cf6c3f7f4b1305596e261fb90201b737ad79e0d6 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Tue, 12 Dec 2017 07:56:36 -0800 Subject: [PATCH 1196/3326] x86/espfix/64: Fix espfix double-fault handling on 5-level systems commit c739f930be1dd5fd949030e3475a884fe06dae9b upstream. Using PGDIR_SHIFT to identify espfix64 addresses on 5-level systems was wrong, and it resulted in panics due to unhandled double faults. Use P4D_SHIFT instead, which is correct on 4-level and 5-level machines. This fixes a panic when running x86 selftests on 5-level machines. Signed-off-by: Andy Lutomirski Acked-by: Kirill A. Shutemov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: David Laight Cc: Kees Cook Cc: Kirill A. Shutemov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 1d33b219563f ("x86/espfix: Add support for 5-level paging") Link: http://lkml.kernel.org/r/24c898b4f44fdf8c22d93703850fb384ef87cfdc.1513035461.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/traps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 7c16fe0b60c247..b33e860d32fe80 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -361,7 +361,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) * * No need for ist_enter here because we don't use RCU. */ - if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && + if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY && regs->cs == __KERNEL_CS && regs->ip == (unsigned long)native_irq_return_iret) { From 3e133155f22d59b0085ef63cbcbcfbd96c4d0b70 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 31 Dec 2017 11:24:34 +0100 Subject: [PATCH 1197/3326] x86/ldt: Plug memory leak in error path commit a62d69857aab4caa43049e72fe0ed5c4a60518dd upstream. The error path in write_ldt() tries to free 'old_ldt' instead of the newly allocated 'new_ldt', resulting in a memory leak. It also misses to clean up a half populated LDT pagetable, which is not a leak as it gets cleaned up when the process exits. Free both the potentially half populated LDT pagetable and the newly allocated LDT struct. This can be done unconditionally because once an LDT is mapped subsequent maps will succeed, because the PTE page is already populated and the two LDTs fit into that single page. Reported-by: Mathieu Desnoyers Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: Dominik Brodowski Cc: Linus Torvalds Cc: Linus Torvalds Cc: Peter Zijlstra Fixes: f55f0501cbf6 ("x86/pti: Put the LDT in its own PGD if PTI is on") Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1712311121340.1899@nanos Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/ldt.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 579cc4a66fdf66..500e90e44f8669 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -421,7 +421,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) */ error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0); if (error) { - free_ldt_struct(old_ldt); + /* + * This only can fail for the first LDT setup. If an LDT is + * already installed then the PTE page is already + * populated. Mop up a half populated page table. + */ + free_ldt_pgtables(mm); + free_ldt_struct(new_ldt); goto out_unlock; } From 57849de13c7da3385611a1b9aca5c6040ef7dfe2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 31 Dec 2017 16:52:15 +0100 Subject: [PATCH 1198/3326] x86/ldt: Make LDT pgtable free conditional commit 7f414195b0c3612acd12b4611a5fe75995cf10c7 upstream. Andy prefers to be paranoid about the pagetable free in the error path of write_ldt(). Make it conditional and warn whenever the installment of a secondary LDT fails. Requested-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/ldt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 500e90e44f8669..26d713ecad34a8 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -426,7 +426,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) * already installed then the PTE page is already * populated. Mop up a half populated page table. */ - free_ldt_pgtables(mm); + if (!WARN_ON_ONCE(old_ldt)) + free_ldt_pgtables(mm); free_ldt_struct(new_ldt); goto out_unlock; } From aaa5a91ff744f91fb1d1c91853aa0c8f126be563 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 20 Dec 2017 17:57:06 -0800 Subject: [PATCH 1199/3326] n_tty: fix EXTPROC vs ICANON interaction with TIOCINQ (aka FIONREAD) commit 966031f340185eddd05affcf72b740549f056348 upstream. We added support for EXTPROC back in 2010 in commit 26df6d13406d ("tty: Add EXTPROC support for LINEMODE") and the intent was to allow it to override some (all?) ICANON behavior. Quoting from that original commit message: There is a new bit in the termios local flag word, EXTPROC. When this bit is set, several aspects of the terminal driver are disabled. Input line editing, character echo, and mapping of signals are all disabled. This allows the telnetd to turn off these functions when in linemode, but still keep track of what state the user wants the terminal to be in. but the problem turns out that "several aspects of the terminal driver are disabled" is a bit ambiguous, and you can really confuse the n_tty layer by setting EXTPROC and then causing some of the ICANON invariants to no longer be maintained. This fixes at least one such case (TIOCINQ) becoming unhappy because of the confusion over whether ICANON really means ICANON when EXTPROC is set. This basically makes TIOCINQ match the case of read: if EXTPROC is set, we ignore ICANON. Also, make sure to reset the ICANON state ie EXTPROC changes, not just if ICANON changes. Fixes: 26df6d13406d ("tty: Add EXTPROC support for LINEMODE") Reported-by: Tetsuo Handa Reported-by: syzkaller Cc: Jiri Slaby Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- drivers/tty/n_tty.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index bdf0e6e8999148..faf50df816224f 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1764,7 +1764,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) { struct n_tty_data *ldata = tty->disc_data; - if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) { + if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) { bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); ldata->line_start = ldata->read_tail; if (!L_ICANON(tty) || !read_cnt(ldata)) { @@ -2427,7 +2427,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file, return put_user(tty_chars_in_buffer(tty), (int __user *) arg); case TIOCINQ: down_write(&tty->termios_rwsem); - if (L_ICANON(tty)) + if (L_ICANON(tty) && !L_EXTPROC(tty)) retval = inq_canon(ldata); else retval = read_cnt(ldata); From 3ade66602bb7ea348ed8cbafcdd56eb1826c2177 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Fri, 3 Nov 2017 15:18:05 +0100 Subject: [PATCH 1200/3326] tty: fix tty_ldisc_receive_buf() documentation commit e7e51dcf3b8a5f65c5653a054ad57eb2492a90d0 upstream. The tty_ldisc_receive_buf() helper returns the number of bytes processed so drop the bogus "not" from the kernel doc comment. Fixes: 8d082cd300ab ("tty: Unify receive_buf() code paths") Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/tty/tty_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index f8eba1c5412f95..677fa99b774784 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -446,7 +446,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string); * Callers other than flush_to_ldisc() need to exclude the kworker * from concurrent use of the line discipline, see paste_selection(). * - * Returns the number of bytes not processed + * Returns the number of bytes processed */ int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, char *f, int count) From 0d59679df5b53755c00ea0292df696f97bfc950d Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 2 Jan 2018 20:31:17 +0100 Subject: [PATCH 1201/3326] Linux 4.14.11 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 930fa93f397d7b..655887067dc7c6 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 10 +SUBLEVEL = 11 EXTRAVERSION = NAME = Petit Gorille From 611583d327961acad4405063fc7c690ac2cf21c3 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 2 Jan 2018 15:21:33 -0800 Subject: [PATCH 1202/3326] exec: Weaken dumpability for secureexec commit e816c201aed5232171f8eb80b5d46ae6516683b9 upstream. This is a logical revert of commit e37fdb785a5f ("exec: Use secureexec for setting dumpability") This weakens dumpability back to checking only for uid/gid changes in current (which is useless), but userspace depends on dumpability not being tied to secureexec. https://bugzilla.redhat.com/show_bug.cgi?id=1528633 Reported-by: Tom Horsley Fixes: e37fdb785a5f ("exec: Use secureexec for setting dumpability") Signed-off-by: Kees Cook Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/exec.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index 3e14ba25f678bf..acec119fcc3141 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1350,9 +1350,14 @@ void setup_new_exec(struct linux_binprm * bprm) current->sas_ss_sp = current->sas_ss_size = 0; - /* Figure out dumpability. */ + /* + * Figure out dumpability. Note that this checking only of current + * is wrong, but userspace depends on it. This should be testing + * bprm->secureexec instead. + */ if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP || - bprm->secureexec) + !(uid_eq(current_euid(), current_uid()) && + gid_eq(current_egid(), current_gid()))) set_dumpable(current->mm, suid_dumpable); else set_dumpable(current->mm, SUID_DUMP_USER); From df4373c513b310d75aafeb8902917e7f8d0fe6a5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 1 Jan 2018 09:28:31 -0600 Subject: [PATCH 1203/3326] capabilities: fix buffer overread on very short xattr commit dc32b5c3e6e2ef29cef76d9ce1b92d394446150e upstream. If userspace attempted to set a "security.capability" xattr shorter than 4 bytes (e.g. 'setfattr -n security.capability -v x file'), then cap_convert_nscap() read past the end of the buffer containing the xattr value because it accessed the ->magic_etc field without verifying that the xattr value is long enough to contain that field. Fix it by validating the xattr value size first. This bug was found using syzkaller with KASAN. The KASAN report was as follows (cleaned up slightly): BUG: KASAN: slab-out-of-bounds in cap_convert_nscap+0x514/0x630 security/commoncap.c:498 Read of size 4 at addr ffff88002d8741c0 by task syz-executor1/2852 CPU: 0 PID: 2852 Comm: syz-executor1 Not tainted 4.15.0-rc6-00200-gcc0aac99d977 #253 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.11.0-20171110_100015-anatol 04/01/2014 Call Trace: __dump_stack lib/dump_stack.c:17 [inline] dump_stack+0xe3/0x195 lib/dump_stack.c:53 print_address_description+0x73/0x260 mm/kasan/report.c:252 kasan_report_error mm/kasan/report.c:351 [inline] kasan_report+0x235/0x350 mm/kasan/report.c:409 cap_convert_nscap+0x514/0x630 security/commoncap.c:498 setxattr+0x2bd/0x350 fs/xattr.c:446 path_setxattr+0x168/0x1b0 fs/xattr.c:472 SYSC_setxattr fs/xattr.c:487 [inline] SyS_setxattr+0x36/0x50 fs/xattr.c:483 entry_SYSCALL_64_fastpath+0x18/0x85 Fixes: 8db6c34f1dbc ("Introduce v3 namespaced file capabilities") Signed-off-by: Eric Biggers Reviewed-by: Serge Hallyn Signed-off-by: James Morris Signed-off-by: Greg Kroah-Hartman --- security/commoncap.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/security/commoncap.c b/security/commoncap.c index fc46f5b8525104..7b01431d1e1975 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -348,21 +348,18 @@ static __u32 sansflags(__u32 m) return m & ~VFS_CAP_FLAGS_EFFECTIVE; } -static bool is_v2header(size_t size, __le32 magic) +static bool is_v2header(size_t size, const struct vfs_cap_data *cap) { - __u32 m = le32_to_cpu(magic); if (size != XATTR_CAPS_SZ_2) return false; - return sansflags(m) == VFS_CAP_REVISION_2; + return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_2; } -static bool is_v3header(size_t size, __le32 magic) +static bool is_v3header(size_t size, const struct vfs_cap_data *cap) { - __u32 m = le32_to_cpu(magic); - if (size != XATTR_CAPS_SZ_3) return false; - return sansflags(m) == VFS_CAP_REVISION_3; + return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_3; } /* @@ -405,7 +402,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, fs_ns = inode->i_sb->s_user_ns; cap = (struct vfs_cap_data *) tmpbuf; - if (is_v2header((size_t) ret, cap->magic_etc)) { + if (is_v2header((size_t) ret, cap)) { /* If this is sizeof(vfs_cap_data) then we're ok with the * on-disk value, so return that. */ if (alloc) @@ -413,7 +410,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, else kfree(tmpbuf); return ret; - } else if (!is_v3header((size_t) ret, cap->magic_etc)) { + } else if (!is_v3header((size_t) ret, cap)) { kfree(tmpbuf); return -EINVAL; } @@ -470,9 +467,9 @@ static kuid_t rootid_from_xattr(const void *value, size_t size, return make_kuid(task_ns, rootid); } -static bool validheader(size_t size, __le32 magic) +static bool validheader(size_t size, const struct vfs_cap_data *cap) { - return is_v2header(size, magic) || is_v3header(size, magic); + return is_v2header(size, cap) || is_v3header(size, cap); } /* @@ -495,7 +492,7 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size) if (!*ivalue) return -EINVAL; - if (!validheader(size, cap->magic_etc)) + if (!validheader(size, cap)) return -EINVAL; if (!capable_wrt_inode_uidgid(inode, CAP_SETFCAP)) return -EPERM; From 151d7039757b71ebd9d170af0944562f51149372 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Tue, 26 Dec 2017 23:43:54 -0600 Subject: [PATCH 1204/3326] x86/cpu, x86/pti: Do not enable PTI on AMD processors commit 694d99d40972f12e59a3696effee8a376b79d7c8 upstream. AMD processors are not subject to the types of attacks that the kernel page table isolation feature protects against. The AMD microarchitecture does not allow memory references, including speculative references, that access higher privileged data when running in a lesser privileged mode when that access would result in a page fault. Disable page table isolation by default on AMD processors by not setting the X86_BUG_CPU_INSECURE feature, which controls whether X86_FEATURE_PTI is set. Signed-off-by: Tom Lendacky Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Dave Hansen Cc: Andy Lutomirski Link: https://lkml.kernel.org/r/20171227054354.20369.94587.stgit@tlendack-t1.amdoffice.net Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f2a94dfb434e9a..b1be494ab4e8ba 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -899,8 +899,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) setup_force_cpu_cap(X86_FEATURE_ALWAYS); - /* Assume for now that ALL x86 CPUs are insecure */ - setup_force_cpu_bug(X86_BUG_CPU_INSECURE); + if (c->x86_vendor != X86_VENDOR_AMD) + setup_force_cpu_bug(X86_BUG_CPU_INSECURE); fpu__init_system(c); From 211ad3fdf63383772d492add846da4b0b2266531 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 3 Jan 2018 15:57:59 +0100 Subject: [PATCH 1205/3326] x86/pti: Make sure the user/kernel PTEs match commit 52994c256df36fda9a715697431cba9daecb6b11 upstream. Meelis reported that his K8 Athlon64 emits MCE warnings when PTI is enabled: [Hardware Error]: Error Addr: 0x0000ffff81e000e0 [Hardware Error]: MC1 Error: L1 TLB multimatch. [Hardware Error]: cache level: L1, tx: INSN The address is in the entry area, which is mapped into kernel _AND_ user space. That's special because we switch CR3 while we are executing there. User mapping: 0xffffffff81e00000-0xffffffff82000000 2M ro PSE GLB x pmd Kernel mapping: 0xffffffff81000000-0xffffffff82000000 16M ro PSE x pmd So the K8 is complaining that the TLB entries differ. They differ in the GLB bit. Drop the GLB bit when installing the user shared mapping. Fixes: 6dc72c3cbca0 ("x86/mm/pti: Share entry text PMD") Reported-by: Meelis Roos Signed-off-by: Thomas Gleixner Tested-by: Meelis Roos Cc: Borislav Petkov Cc: Tom Lendacky Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801031407180.1957@nanos Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/pti.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index bce8aea6560621..2da28ba9750824 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -367,7 +367,8 @@ static void __init pti_setup_espfix64(void) static void __init pti_clone_entry_text(void) { pti_clone_pmds((unsigned long) __entry_text_start, - (unsigned long) __irqentry_text_end, _PAGE_RW); + (unsigned long) __irqentry_text_end, + _PAGE_RW | _PAGE_GLOBAL); } /* From a841ba8f4d7e37acc9d46f982f418ee8e876a752 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Sun, 31 Dec 2017 10:18:06 -0600 Subject: [PATCH 1206/3326] x86/dumpstack: Fix partial register dumps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a9cdbe72c4e8bf3b38781c317a79326e2e1a230d upstream. The show_regs_safe() logic is wrong. When there's an iret stack frame, it prints the entire pt_regs -- most of which is random stack data -- instead of just the five registers at the end. show_regs_safe() is also poorly named: the on_stack() checks aren't for safety. Rename the function to show_regs_if_on_stack() and add a comment to explain why the checks are needed. These issues were introduced with the "partial register dump" feature of the following commit: b02fcf9ba121 ("x86/unwinder: Handle stack overflows more gracefully") That patch had gone through a few iterations of development, and the above issues were artifacts from a previous iteration of the patch where 'regs' pointed directly to the iret frame rather than to the (partially empty) pt_regs. Tested-by: Alexander Tsoy Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Toralf Förster Fixes: b02fcf9ba121 ("x86/unwinder: Handle stack overflows more gracefully") Link: http://lkml.kernel.org/r/5b05b8b344f59db2d3d50dbdeba92d60f2304c54.1514736742.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/unwind.h | 17 +++++++++++++---- arch/x86/kernel/dumpstack.c | 28 ++++++++++++++++++++-------- arch/x86/kernel/stacktrace.c | 2 +- 3 files changed, 34 insertions(+), 13 deletions(-) diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index c1688c2d0a128f..1f86e1b0a5cdc1 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -56,18 +56,27 @@ void unwind_start(struct unwind_state *state, struct task_struct *task, #if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER) /* - * WARNING: The entire pt_regs may not be safe to dereference. In some cases, - * only the iret frame registers are accessible. Use with caution! + * If 'partial' returns true, only the iret frame registers are valid. */ -static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) +static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state, + bool *partial) { if (unwind_done(state)) return NULL; + if (partial) { +#ifdef CONFIG_UNWINDER_ORC + *partial = !state->full_regs; +#else + *partial = false; +#endif + } + return state->regs; } #else -static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) +static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state, + bool *partial) { return NULL; } diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 5fa110699ed275..d0bb176a7261ae 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -76,12 +76,23 @@ void show_iret_regs(struct pt_regs *regs) regs->sp, regs->flags); } -static void show_regs_safe(struct stack_info *info, struct pt_regs *regs) +static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs, + bool partial) { - if (on_stack(info, regs, sizeof(*regs))) + /* + * These on_stack() checks aren't strictly necessary: the unwind code + * has already validated the 'regs' pointer. The checks are done for + * ordering reasons: if the registers are on the next stack, we don't + * want to print them out yet. Otherwise they'll be shown as part of + * the wrong stack. Later, when show_trace_log_lvl() switches to the + * next stack, this function will be called again with the same regs so + * they can be printed in the right context. + */ + if (!partial && on_stack(info, regs, sizeof(*regs))) { __show_regs(regs, 0); - else if (on_stack(info, (void *)regs + IRET_FRAME_OFFSET, - IRET_FRAME_SIZE)) { + + } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, + IRET_FRAME_SIZE)) { /* * When an interrupt or exception occurs in entry code, the * full pt_regs might not have been saved yet. In that case @@ -98,6 +109,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, struct stack_info stack_info = {0}; unsigned long visit_mask = 0; int graph_idx = 0; + bool partial; printk("%sCall Trace:\n", log_lvl); @@ -140,7 +152,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, printk("%s <%s>\n", log_lvl, stack_name); if (regs) - show_regs_safe(&stack_info, regs); + show_regs_if_on_stack(&stack_info, regs, partial); /* * Scan the stack, printing any text addresses we find. At the @@ -164,7 +176,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, /* * Don't print regs->ip again if it was already printed - * by show_regs_safe() below. + * by show_regs_if_on_stack(). */ if (regs && stack == ®s->ip) goto next; @@ -199,9 +211,9 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unwind_next_frame(&state); /* if the frame has entry regs, print them */ - regs = unwind_get_entry_regs(&state); + regs = unwind_get_entry_regs(&state, &partial); if (regs) - show_regs_safe(&stack_info, regs); + show_regs_if_on_stack(&stack_info, regs, partial); } if (stack_name) diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 8dabd7bf167309..60244bfaf88f69 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -98,7 +98,7 @@ static int __save_stack_trace_reliable(struct stack_trace *trace, for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state); unwind_next_frame(&state)) { - regs = unwind_get_entry_regs(&state); + regs = unwind_get_entry_regs(&state, NULL); if (regs) { /* * Kernel mode registers on the stack indicate an From 89c96233a9ed8ee76d6e8e6a13ea19b92971d33f Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Sun, 31 Dec 2017 10:18:07 -0600 Subject: [PATCH 1207/3326] x86/dumpstack: Print registers for first stack frame MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 3ffdeb1a02be3086f1411a15c5b9c481fa28e21f upstream. In the stack dump code, if the frame after the starting pt_regs is also a regs frame, the registers don't get printed. Fix that. Reported-by: Andy Lutomirski Tested-by: Alexander Tsoy Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Toralf Förster Fixes: 3b3fa11bc700 ("x86/dumpstack: Print any pt_regs found on the stack") Link: http://lkml.kernel.org/r/396f84491d2f0ef64eda4217a2165f5712f6a115.1514736742.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/dumpstack.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index d0bb176a7261ae..afbecff161d162 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -115,6 +115,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unwind_start(&state, task, regs, stack); stack = stack ? : get_stack_pointer(task, regs); + regs = unwind_get_entry_regs(&state, &partial); /* * Iterate through the stacks, starting with the current stack pointer. @@ -132,7 +133,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, * - hardirq stack * - entry stack */ - for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { + for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { const char *stack_name; if (get_stack_info(stack, task, &stack_info, &visit_mask)) { From 7930cb29fb5feb6d18ffc20a83a1d3e5afac7a8a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 3 Jan 2018 19:52:04 +0100 Subject: [PATCH 1208/3326] x86/pti: Switch to kernel CR3 at early in entry_SYSCALL_compat() commit d7732ba55c4b6a2da339bb12589c515830cfac2c upstream. The preparation for PTI which added CR3 switching to the entry code misplaced the CR3 switch in entry_SYSCALL_compat(). With PTI enabled the entry code tries to access a per cpu variable after switching to kernel GS. This fails because that variable is not mapped to user space. This results in a double fault and in the worst case a kernel crash. Move the switch ahead of the access and clobber RSP which has been saved already. Fixes: 8a09317b895f ("x86/mm/pti: Prepare the x86/entry assembly code for entry/exit CR3 switching") Reported-by: Lars Wendler Reported-by: Laura Abbott Signed-off-by: Thomas Gleixner Cc: Borislav Betkov Cc: Andy Lutomirski , Cc: Dave Hansen , Cc: Peter Zijlstra , Cc: Greg KH , , Cc: Boris Ostrovsky , Cc: Juergen Gross Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801031949200.1957@nanos Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64_compat.S | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 40f17009ec20cd..98d5358e4041a7 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -190,8 +190,13 @@ ENTRY(entry_SYSCALL_compat) /* Interrupts are off on entry. */ swapgs - /* Stash user ESP and switch to the kernel stack. */ + /* Stash user ESP */ movl %esp, %r8d + + /* Use %rsp as scratch reg. User ESP is stashed in r8 */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp + + /* Switch to the kernel stack */ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* Construct struct pt_regs on stack */ @@ -219,12 +224,6 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe) pushq $0 /* pt_regs->r14 = 0 */ pushq $0 /* pt_regs->r15 = 0 */ - /* - * We just saved %rdi so it is safe to clobber. It is not - * preserved during the C calls inside TRACE_IRQS_OFF anyway. - */ - SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi - /* * User mode is traced as though IRQs are on, and SYSENTER * turned them off. From 88dff1ab04d2792492e3f7e166d039f0dc4e1a09 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Wed, 3 Jan 2018 12:39:52 -0800 Subject: [PATCH 1209/3326] x86/process: Define cpu_tss_rw in same section as declaration commit 2fd9c41aea47f4ad071accf94b94f94f2c4d31eb upstream. cpu_tss_rw is declared with DECLARE_PER_CPU_PAGE_ALIGNED but then defined with DEFINE_PER_CPU_SHARED_ALIGNED leading to section mismatch warnings. Use DEFINE_PER_CPU_PAGE_ALIGNED consistently. This is necessary because it's mapped to the cpu entry area and must be page aligned. [ tglx: Massaged changelog a bit ] Fixes: 1a935bc3d4ea ("x86/entry: Move SYSENTER_stack to the beginning of struct tss_struct") Suggested-by: Thomas Gleixner Signed-off-by: Nick Desaulniers Signed-off-by: Thomas Gleixner Cc: thomas.lendacky@amd.com Cc: Borislav Petkov Cc: tklauser@distanz.ch Cc: minipli@googlemail.com Cc: me@kylehuey.com Cc: namit@vmware.com Cc: luto@kernel.org Cc: jpoimboe@redhat.com Cc: tj@kernel.org Cc: cl@linux.com Cc: bp@suse.de Cc: thgarnie@google.com Cc: kirill.shutemov@linux.intel.com Link: https://lkml.kernel.org/r/20180103203954.183360-1-ndesaulniers@google.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/process.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 51741597840936..3cb2486c47e48a 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -47,7 +47,7 @@ * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = { +__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = { .x86_tss = { /* * .sp0 is only used when entering ring 0 from a lower From 2d01ac8cc12b973668bf898b03bf9ffb12d83b83 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 15 Nov 2017 06:40:57 +0100 Subject: [PATCH 1210/3326] Revert "xfrm: Fix stack-out-of-bounds read in xfrm_state_find." commit 94802151894d482e82c324edf2c658f8e6b96508 upstream. This reverts commit c9f3f813d462c72dbe412cee6a5cbacf13c4ad5e. This commit breaks transport mode when the policy template has widlcard addresses configured, so revert it. Signed-off-by: Steffen Klassert Cc: From: Derek Robson Signed-off-by: Greg Kroah-Hartman --- net/xfrm/xfrm_policy.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 2a6093840e7e85..6bc16bb61b5533 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1362,29 +1362,36 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, struct net *net = xp_net(policy); int nx; int i, error; + xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); + xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); xfrm_address_t tmp; for (nx = 0, i = 0; i < policy->xfrm_nr; i++) { struct xfrm_state *x; - xfrm_address_t *local; - xfrm_address_t *remote; + xfrm_address_t *remote = daddr; + xfrm_address_t *local = saddr; struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; - remote = &tmpl->id.daddr; - local = &tmpl->saddr; - if (xfrm_addr_any(local, tmpl->encap_family)) { - error = xfrm_get_saddr(net, fl->flowi_oif, - &tmp, remote, - tmpl->encap_family, 0); - if (error) - goto fail; - local = &tmp; + if (tmpl->mode == XFRM_MODE_TUNNEL || + tmpl->mode == XFRM_MODE_BEET) { + remote = &tmpl->id.daddr; + local = &tmpl->saddr; + if (xfrm_addr_any(local, tmpl->encap_family)) { + error = xfrm_get_saddr(net, fl->flowi_oif, + &tmp, remote, + tmpl->encap_family, 0); + if (error) + goto fail; + local = &tmp; + } } x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); if (x && x->km.state == XFRM_STATE_VALID) { xfrm[nx++] = x; + daddr = remote; + saddr = local; continue; } if (x) { From 56073ba4e7068a737a225631b5b642a2cd105b7a Mon Sep 17 00:00:00 2001 From: Troy Kisky Date: Thu, 2 Nov 2017 18:58:12 -0700 Subject: [PATCH 1211/3326] rtc: m41t80: m41t80_sqw_set_rate should return 0 on success commit de6042d2fa8afe22b76e3c68fd6e9584c9415a3b upstream. Previously it was returning -EINVAL upon success. Signed-off-by: Troy Kisky Signed-off-by: Alexandre Belloni Cc: Christoph Fritz Signed-off-by: Greg Kroah-Hartman --- drivers/rtc/rtc-m41t80.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index f4c070ea83849a..8f5843169dc247 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -510,10 +510,7 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate, reg = (reg & 0x0f) | (val << 4); ret = i2c_smbus_write_byte_data(client, reg_sqw, reg); - if (ret < 0) - return ret; - - return -EINVAL; + return ret; } static int m41t80_sqw_control(struct clk_hw *hw, bool enable) From 8932255e96ecd172783c2c5a3a414066bda844cf Mon Sep 17 00:00:00 2001 From: Troy Kisky Date: Thu, 2 Nov 2017 18:58:13 -0700 Subject: [PATCH 1212/3326] rtc: m41t80: fix m41t80_sqw_round_rate return value commit c8384bb04261b9d32fe7402a6068ddaf38913b23 upstream. Previously it was returning the best of 32768, 8192, 1024, 64, 2, 0 Now, best of 32768, 8192, 4096, 2048, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1, 0 Signed-off-by: Troy Kisky Signed-off-by: Alexandre Belloni Cc: Christoph Fritz Signed-off-by: Greg Kroah-Hartman --- drivers/rtc/rtc-m41t80.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 8f5843169dc247..42fc735a54461b 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -468,18 +468,13 @@ static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw, static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { - int i, freq = M41T80_SQW_MAX_FREQ; - - if (freq <= rate) - return freq; - - for (i = 2; i <= ilog2(M41T80_SQW_MAX_FREQ); i++) { - freq /= 1 << i; - if (freq <= rate) - return freq; - } - - return 0; + if (rate >= M41T80_SQW_MAX_FREQ) + return M41T80_SQW_MAX_FREQ; + if (rate >= M41T80_SQW_MAX_FREQ / 4) + return M41T80_SQW_MAX_FREQ / 4; + if (!rate) + return 0; + return 1 << ilog2(rate); } static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate, From 11118640ae1705c57699a394d0754d4c699772c6 Mon Sep 17 00:00:00 2001 From: Troy Kisky Date: Thu, 2 Nov 2017 18:58:14 -0700 Subject: [PATCH 1213/3326] rtc: m41t80: avoid i2c read in m41t80_sqw_recalc_rate commit 2cb90ed3de1e279dbaf23df141f54eb9fb1861e6 upstream. This is a little more efficient, and avoids the warning WARNING: possible circular locking dependency detected 4.14.0-rc7-00007 #14 Not tainted ------------------------------------------------------ alsactl/330 is trying to acquire lock: (prepare_lock){+.+.}, at: [] clk_prepare_lock+0x80/0xf4 but task is already holding lock: (i2c_register_adapter){+.+.}, at: [] i2c_adapter_lock_bus+0x14/0x18 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (i2c_register_adapter){+.+.}: rt_mutex_lock+0x44/0x5c i2c_adapter_lock_bus+0x14/0x18 i2c_transfer+0xa8/0xbc i2c_smbus_xfer+0x20c/0x5d8 i2c_smbus_read_byte_data+0x38/0x48 m41t80_sqw_recalc_rate+0x24/0x58 Signed-off-by: Troy Kisky Signed-off-by: Alexandre Belloni Cc: Christoph Fritz Signed-off-by: Greg Kroah-Hartman --- drivers/rtc/rtc-m41t80.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 42fc735a54461b..f44dcf628c87b7 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -154,6 +154,7 @@ struct m41t80_data { struct rtc_device *rtc; #ifdef CONFIG_COMMON_CLK struct clk_hw sqw; + unsigned long freq; #endif }; @@ -443,26 +444,28 @@ static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume); #ifdef CONFIG_COMMON_CLK #define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw) -static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) +static unsigned long m41t80_decode_freq(int setting) +{ + return (setting == 0) ? 0 : (setting == 1) ? M41T80_SQW_MAX_FREQ : + M41T80_SQW_MAX_FREQ >> setting; +} + +static unsigned long m41t80_get_freq(struct m41t80_data *m41t80) { - struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw); struct i2c_client *client = m41t80->client; int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ? M41T80_REG_WDAY : M41T80_REG_SQW; int ret = i2c_smbus_read_byte_data(client, reg_sqw); - unsigned long val = M41T80_SQW_MAX_FREQ; if (ret < 0) return 0; + return m41t80_decode_freq(ret >> 4); +} - ret >>= 4; - if (ret == 0) - val = 0; - else if (ret > 1) - val = val / (1 << ret); - - return val; +static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return sqw_to_m41t80_data(hw)->freq; } static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate, @@ -505,6 +508,8 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate, reg = (reg & 0x0f) | (val << 4); ret = i2c_smbus_write_byte_data(client, reg_sqw, reg); + if (!ret) + m41t80->freq = m41t80_decode_freq(val); return ret; } @@ -579,6 +584,7 @@ static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80) init.parent_names = NULL; init.num_parents = 0; m41t80->sqw.init = &init; + m41t80->freq = m41t80_get_freq(m41t80); /* optional override of the clockname */ of_property_read_string(node, "clock-output-names", &init.name); From 7623bd95a6dccf74e30a21e437f2f789fbbe27f7 Mon Sep 17 00:00:00 2001 From: Troy Kisky Date: Thu, 2 Nov 2017 18:58:15 -0700 Subject: [PATCH 1214/3326] rtc: m41t80: avoid i2c read in m41t80_sqw_is_prepared commit 13bb1d78f2e372ec0d9b30489ac63768240140fc upstream. This is a little more efficient and avoids the warning WARNING: possible circular locking dependency detected 4.14.0-rc7-00010 #16 Not tainted ------------------------------------------------------ kworker/2:1/70 is trying to acquire lock: (prepare_lock){+.+.}, at: [] clk_prepare_lock+0x80/0xf4 but task is already holding lock: (i2c_register_adapter){+.+.}, at: [] i2c_adapter_lock_bus+0x14/0x18 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (i2c_register_adapter){+.+.}: rt_mutex_lock+0x44/0x5c i2c_adapter_lock_bus+0x14/0x18 i2c_transfer+0xa8/0xbc i2c_smbus_xfer+0x20c/0x5d8 i2c_smbus_read_byte_data+0x38/0x48 m41t80_sqw_is_prepared+0x18/0x28 Signed-off-by: Troy Kisky Signed-off-by: Alexandre Belloni Cc: Christoph Fritz Signed-off-by: Greg Kroah-Hartman --- drivers/rtc/rtc-m41t80.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index f44dcf628c87b7..96a606d5f6e69b 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -155,6 +155,7 @@ struct m41t80_data { #ifdef CONFIG_COMMON_CLK struct clk_hw sqw; unsigned long freq; + unsigned int sqwe; #endif }; @@ -527,7 +528,10 @@ static int m41t80_sqw_control(struct clk_hw *hw, bool enable) else ret &= ~M41T80_ALMON_SQWE; - return i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret); + ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret); + if (!ret) + m41t80->sqwe = enable; + return ret; } static int m41t80_sqw_prepare(struct clk_hw *hw) @@ -542,14 +546,7 @@ static void m41t80_sqw_unprepare(struct clk_hw *hw) static int m41t80_sqw_is_prepared(struct clk_hw *hw) { - struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw); - struct i2c_client *client = m41t80->client; - int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); - - if (ret < 0) - return ret; - - return !!(ret & M41T80_ALMON_SQWE); + return sqw_to_m41t80_data(hw)->sqwe; } static const struct clk_ops m41t80_sqw_ops = { From 566fb9906ee25a697f4b694dd624398712adf00b Mon Sep 17 00:00:00 2001 From: Troy Kisky Date: Thu, 2 Nov 2017 18:58:16 -0700 Subject: [PATCH 1215/3326] rtc: m41t80: remove unneeded checks from m41t80_sqw_set_rate commit 05a03bf260e0480bfc0db91b1fdbc2115e3f193b upstream. m41t80_sqw_set_rate will be called with the result from m41t80_sqw_round_rate, so might as well make m41t80_sqw_set_rate(n) same as m41t80_sqw_set_rate(m41t80_sqw_round_rate(n)) As Russell King wrote[1], "clk_round_rate() is supposed to tell you what you end up with if you ask clk_set_rate() to set the exact same value you passed in - but clk_round_rate() won't modify the hardware." [1] http://lists.infradead.org/pipermail/linux-arm-kernel/2012-January/080175.html Signed-off-by: Troy Kisky Signed-off-by: Alexandre Belloni Cc: Christoph Fritz Signed-off-by: Greg Kroah-Hartman --- drivers/rtc/rtc-m41t80.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 96a606d5f6e69b..c90fba3ed86188 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -490,17 +490,12 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate, M41T80_REG_WDAY : M41T80_REG_SQW; int reg, ret, val = 0; - if (rate) { - if (!is_power_of_2(rate)) - return -EINVAL; - val = ilog2(rate); - if (val == ilog2(M41T80_SQW_MAX_FREQ)) - val = 1; - else if (val < (ilog2(M41T80_SQW_MAX_FREQ) - 1)) - val = ilog2(M41T80_SQW_MAX_FREQ) - val; - else - return -EINVAL; - } + if (rate >= M41T80_SQW_MAX_FREQ) + val = 1; + else if (rate >= M41T80_SQW_MAX_FREQ / 4) + val = 2; + else if (rate) + val = 15 - ilog2(rate); reg = i2c_smbus_read_byte_data(client, reg_sqw); if (reg < 0) From 8d577afdee3540808302d9dc7a0a7be96c91178f Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 5 Jan 2018 15:48:59 +0100 Subject: [PATCH 1216/3326] Linux 4.14.12 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 655887067dc7c6..20f7d4de0f1c30 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 11 +SUBLEVEL = 12 EXTRAVERSION = NAME = Petit Gorille From 7adf28df2f0bdeb2141f6e8edc2b746d4a6cc11f Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Thu, 28 Dec 2017 19:06:20 +0300 Subject: [PATCH 1217/3326] x86/mm: Set MODULES_END to 0xffffffffff000000 commit f5a40711fa58f1c109165a4fec6078bf2dfd2bdc upstream. Since f06bdd4001c2 ("x86/mm: Adapt MODULES_END based on fixmap section size") kasan_mem_to_shadow(MODULES_END) could be not aligned to a page boundary. So passing page unaligned address to kasan_populate_zero_shadow() have two possible effects: 1) It may leave one page hole in supposed to be populated area. After commit 21506525fb8d ("x86/kasan/64: Teach KASAN about the cpu_entry_area") that hole happens to be in the shadow covering fixmap area and leads to crash: BUG: unable to handle kernel paging request at fffffbffffe8ee04 RIP: 0010:check_memory_region+0x5c/0x190 Call Trace: memcpy+0x1f/0x50 ghes_copy_tofrom_phys+0xab/0x180 ghes_read_estatus+0xfb/0x280 ghes_notify_nmi+0x2b2/0x410 nmi_handle+0x115/0x2c0 default_do_nmi+0x57/0x110 do_nmi+0xf8/0x150 end_repeat_nmi+0x1a/0x1e Note, the crash likely disappeared after commit 92a0f81d8957, which changed kasan_populate_zero_shadow() call the way it was before commit 21506525fb8d. 2) Attempt to load module near MODULES_END will fail, because __vmalloc_node_range() called from kasan_module_alloc() will hit the WARN_ON(!pte_none(*pte)) in the vmap_pte_range() and bail out with error. To fix this we need to make kasan_mem_to_shadow(MODULES_END) page aligned which means that MODULES_END should be 8*PAGE_SIZE aligned. The whole point of commit f06bdd4001c2 was to move MODULES_END down if NR_CPUS is big, so the cpu_entry_area takes a lot of space. But since 92a0f81d8957 ("x86/cpu_entry_area: Move it out of the fixmap") the cpu_entry_area is no longer in fixmap, so we could just set MODULES_END to a fixed 8*PAGE_SIZE aligned address. Fixes: f06bdd4001c2 ("x86/mm: Adapt MODULES_END based on fixmap section size") Reported-by: Jakub Kicinski Signed-off-by: Andrey Ryabinin Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Thomas Garnier Link: https://lkml.kernel.org/r/20171228160620.23818-1-aryabinin@virtuozzo.com Signed-off-by: Greg Kroah-Hartman --- Documentation/x86/x86_64/mm.txt | 5 +---- arch/x86/include/asm/pgtable_64_types.h | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index ad41b3813f0a3a..ddd5ffd31bd040 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -43,7 +43,7 @@ ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space ... unused hole ... ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0 -ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space +ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space [fixmap start] - ffffffffff5fffff kernel-internal fixmap range ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole @@ -67,9 +67,6 @@ memory window (this size is arbitrary, it can be raised later if needed). The mappings are not part of any other kernel PGD and are only available during EFI runtime calls. -The module mapping space size changes based on the CONFIG requirements for the -following fixmap section. - Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all physical memory, vmalloc/ioremap space and virtual memory map are randomized. Their order is preserved but their base will be offset early at boot time. diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index b97a539bcdeee8..6233e5595389b9 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -104,7 +104,7 @@ typedef struct { pteval_t pte; } pte_t; #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) /* The module sections ends with the start of the fixmap */ -#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) +#define MODULES_END _AC(0xffffffffff000000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) #define ESPFIX_PGD_ENTRY _AC(-2, UL) From 1af9b74bfa59250f69578f6e6424aae7cb46d063 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 4 Jan 2018 13:01:40 +0100 Subject: [PATCH 1218/3326] x86/mm: Map cpu_entry_area at the same place on 4/5 level commit f2078904810373211fb15f91888fba14c01a4acc upstream. There is no reason for 4 and 5 level pagetables to have a different layout. It just makes determining vaddr_end for KASLR harder than necessary. Fixes: 92a0f81d8957 ("x86/cpu_entry_area: Move it out of the fixmap") Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Cc: Benjamin Gilbert Cc: Greg Kroah-Hartman Cc: Dave Hansen Cc: Peter Zijlstra Cc: Thomas Garnier Cc: Alexander Kuleshov Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801041320360.1771@nanos Signed-off-by: Greg Kroah-Hartman --- Documentation/x86/x86_64/mm.txt | 7 ++++--- arch/x86/include/asm/pgtable_64_types.h | 4 ++-- arch/x86/mm/dump_pagetables.c | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index ddd5ffd31bd040..f7dabe1f01e9b5 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -12,8 +12,8 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) ... unused hole ... ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) ... unused hole ... -fffffe0000000000 - fffffe7fffffffff (=39 bits) LDT remap for PTI -fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping +fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping +fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ... unused hole ... ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space @@ -37,7 +37,8 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) ... unused hole ... ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) ... unused hole ... -fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping +fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping +... unused hole ... ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ... unused hole ... ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 6233e5595389b9..61b4b60bdc133d 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -88,7 +88,7 @@ typedef struct { pteval_t pte; } pte_t; # define VMALLOC_SIZE_TB _AC(32, UL) # define __VMALLOC_BASE _AC(0xffffc90000000000, UL) # define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) -# define LDT_PGD_ENTRY _AC(-4, UL) +# define LDT_PGD_ENTRY _AC(-3, UL) # define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #endif @@ -110,7 +110,7 @@ typedef struct { pteval_t pte; } pte_t; #define ESPFIX_PGD_ENTRY _AC(-2, UL) #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) -#define CPU_ENTRY_AREA_PGD _AC(-3, UL) +#define CPU_ENTRY_AREA_PGD _AC(-4, UL) #define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT) #define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index f56902c1f04b94..2a4849e92831b0 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -61,10 +61,10 @@ enum address_markers_idx { KASAN_SHADOW_START_NR, KASAN_SHADOW_END_NR, #endif + CPU_ENTRY_AREA_NR, #if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL) LDT_NR, #endif - CPU_ENTRY_AREA_NR, #ifdef CONFIG_X86_ESPFIX64 ESPFIX_START_NR, #endif From 67f67244f80a37fba03c9e6011853dba94952c7c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 4 Jan 2018 12:32:03 +0100 Subject: [PATCH 1219/3326] x86/kaslr: Fix the vaddr_end mess commit 1dddd25125112ba49706518ac9077a1026a18f37 upstream. vaddr_end for KASLR is only documented in the KASLR code itself and is adjusted depending on config options. So it's not surprising that a change of the memory layout causes KASLR to have the wrong vaddr_end. This can map arbitrary stuff into other areas causing hard to understand problems. Remove the whole ifdef magic and define the start of the cpu_entry_area to be the end of the KASLR vaddr range. Add documentation to that effect. Fixes: 92a0f81d8957 ("x86/cpu_entry_area: Move it out of the fixmap") Reported-by: Benjamin Gilbert Signed-off-by: Thomas Gleixner Tested-by: Benjamin Gilbert Cc: Andy Lutomirski Cc: Greg Kroah-Hartman Cc: Dave Hansen Cc: Peter Zijlstra Cc: Thomas Garnier Cc: Alexander Kuleshov Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801041320360.1771@nanos Signed-off-by: Greg Kroah-Hartman --- Documentation/x86/x86_64/mm.txt | 6 +++++ arch/x86/include/asm/pgtable_64_types.h | 8 ++++++- arch/x86/mm/kaslr.c | 32 +++++++------------------ 3 files changed, 22 insertions(+), 24 deletions(-) diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index f7dabe1f01e9b5..ea91cb61a60297 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -12,6 +12,7 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) ... unused hole ... ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) ... unused hole ... + vaddr_end for KASLR fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks @@ -37,6 +38,7 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) ... unused hole ... ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) ... unused hole ... + vaddr_end for KASLR fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping ... unused hole ... ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks @@ -71,3 +73,7 @@ during EFI runtime calls. Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all physical memory, vmalloc/ioremap space and virtual memory map are randomized. Their order is preserved but their base will be offset early at boot time. + +Be very careful vs. KASLR when changing anything here. The KASLR address +range must not overlap with anything except the KASAN shadow area, which is +correct as KASAN disables KASLR. diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 61b4b60bdc133d..6b8f73dcbc2c2b 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -75,7 +75,13 @@ typedef struct { pteval_t pte; } pte_t; #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) -/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ +/* + * See Documentation/x86/x86_64/mm.txt for a description of the memory map. + * + * Be very careful vs. KASLR when changing anything here. The KASLR address + * range must not overlap with anything except the KASAN shadow area, which + * is correct as KASAN disables KASLR. + */ #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) #ifdef CONFIG_X86_5LEVEL diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 879ef930e2c2b0..aedebd2ebf1ead 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -34,25 +34,14 @@ #define TB_SHIFT 40 /* - * Virtual address start and end range for randomization. The end changes base - * on configuration to have the highest amount of space for randomization. - * It increases the possible random position for each randomized region. + * Virtual address start and end range for randomization. * - * You need to add an if/def entry if you introduce a new memory region - * compatible with KASLR. Your entry must be in logical order with memory - * layout. For example, ESPFIX is before EFI because its virtual address is - * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to - * ensure that this order is correct and won't be changed. + * The end address could depend on more configuration options to make the + * highest amount of space for randomization available, but that's too hard + * to keep straight and caused issues already. */ static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; - -#if defined(CONFIG_X86_ESPFIX64) -static const unsigned long vaddr_end = ESPFIX_BASE_ADDR; -#elif defined(CONFIG_EFI) -static const unsigned long vaddr_end = EFI_VA_END; -#else -static const unsigned long vaddr_end = __START_KERNEL_map; -#endif +static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE; /* Default values */ unsigned long page_offset_base = __PAGE_OFFSET_BASE; @@ -101,15 +90,12 @@ void __init kernel_randomize_memory(void) unsigned long remain_entropy; /* - * All these BUILD_BUG_ON checks ensures the memory layout is - * consistent with the vaddr_start/vaddr_end variables. + * These BUILD_BUG_ON checks ensure the memory layout is consistent + * with the vaddr_start/vaddr_end variables. These checks are very + * limited.... */ BUILD_BUG_ON(vaddr_start >= vaddr_end); - BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && - vaddr_end >= EFI_VA_END); - BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) || - IS_ENABLED(CONFIG_EFI)) && - vaddr_end >= __START_KERNEL_map); + BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE); BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); if (!kaslr_memory_enabled()) From b4660939afccbe1d2af5d066d1337f1b29c87ae8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 4 Jan 2018 18:07:12 +0100 Subject: [PATCH 1220/3326] x86/events/intel/ds: Use the proper cache flush method for mapping ds buffers commit 42f3bdc5dd962a5958bc024c1e1444248a6b8b4a upstream. Thomas reported the following warning: BUG: using smp_processor_id() in preemptible [00000000] code: ovsdb-server/4498 caller is native_flush_tlb_single+0x57/0xc0 native_flush_tlb_single+0x57/0xc0 __set_pte_vaddr+0x2d/0x40 set_pte_vaddr+0x2f/0x40 cea_set_pte+0x30/0x40 ds_update_cea.constprop.4+0x4d/0x70 reserve_ds_buffers+0x159/0x410 x86_reserve_hardware+0x150/0x160 x86_pmu_event_init+0x3e/0x1f0 perf_try_init_event+0x69/0x80 perf_event_alloc+0x652/0x740 SyS_perf_event_open+0x3f6/0xd60 do_syscall_64+0x5c/0x190 set_pte_vaddr is used to map the ds buffers into the cpu entry area, but there are two problems with that: 1) The resulting flush is not supposed to be called in preemptible context 2) The cpu entry area is supposed to be per CPU, but the debug store buffers are mapped for all CPUs so these mappings need to be flushed globally. Add the necessary preemption protection across the mapping code and flush TLBs globally. Fixes: c1961a4631da ("x86/events/intel/ds: Map debug buffers in cpu_entry_area") Reported-by: Thomas Zeitlhofer Signed-off-by: Peter Zijlstra Signed-off-by: Thomas Gleixner Tested-by: Thomas Zeitlhofer Cc: Greg Kroah-Hartman Cc: Hugh Dickins Link: https://lkml.kernel.org/r/20180104170712.GB3040@hirez.programming.kicks-ass.net Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/intel/ds.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 8f0aace08b8797..8156e47da7ba4c 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -5,6 +5,7 @@ #include #include +#include #include #include "../perf_event.h" @@ -283,20 +284,35 @@ static DEFINE_PER_CPU(void *, insn_buffer); static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot) { + unsigned long start = (unsigned long)cea; phys_addr_t pa; size_t msz = 0; pa = virt_to_phys(addr); + + preempt_disable(); for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE) cea_set_pte(cea, pa, prot); + + /* + * This is a cross-CPU update of the cpu_entry_area, we must shoot down + * all TLB entries for it. + */ + flush_tlb_kernel_range(start, start + size); + preempt_enable(); } static void ds_clear_cea(void *cea, size_t size) { + unsigned long start = (unsigned long)cea; size_t msz = 0; + preempt_disable(); for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE) cea_set_pte(cea, 0, PAGE_NONE); + + flush_tlb_kernel_range(start, start + size); + preempt_enable(); } static void *dsalloc_pages(size_t size, gfp_t flags, int cpu) From 98f42e3f849412895171d096b901bfa3ff0fe004 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 4 Jan 2018 22:19:04 +0100 Subject: [PATCH 1221/3326] x86/tlb: Drop the _GPL from the cpu_tlbstate export commit 1e5476815fd7f98b888e01a0f9522b63085f96c9 upstream. The recent changes for PTI touch cpu_tlbstate from various tlb_flush inlines. cpu_tlbstate is exported as GPL symbol, so this causes a regression when building out of tree drivers for certain graphics cards. Aside of that the export was wrong since it was introduced as it should have been EXPORT_PER_CPU_SYMBOL_GPL(). Use the correct PER_CPU export and drop the _GPL to restore the previous state which allows users to utilize the cards they payed for. As always I'm really thrilled to make this kind of change to support the #friends (or however the hot hashtag of today is spelled) from that closet sauce graphics corp. Fixes: 1e02ce4cccdc ("x86: Store a per-cpu shadow copy of CR4") Fixes: 6fd166aae78c ("x86/mm: Use/Fix PCID to optimize user/kernel switches") Reported-by: Kees Cook Signed-off-by: Thomas Gleixner Cc: Greg Kroah-Hartman Cc: Peter Zijlstra Cc: Andy Lutomirski Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 80259ad8c38601..6b462a472a7bfb 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -870,7 +870,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { .next_asid = 1, .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ }; -EXPORT_SYMBOL_GPL(cpu_tlbstate); +EXPORT_PER_CPU_SYMBOL(cpu_tlbstate); void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) { From 83c7e4977221da397ac37119c50a1b21cc6a6677 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 4 Jan 2018 14:37:05 +0000 Subject: [PATCH 1222/3326] x86/alternatives: Add missing '\n' at end of ALTERNATIVE inline asm commit b9e705ef7cfaf22db0daab91ad3cd33b0fa32eb9 upstream. Where an ALTERNATIVE is used in the middle of an inline asm block, this would otherwise lead to the following instruction being appended directly to the trailing ".popsection", and a failed compile. Fixes: 9cebed423c84 ("x86, alternative: Use .pushsection/.popsection") Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: ak@linux.intel.com Cc: Tim Chen Cc: Peter Zijlstra Cc: Paul Turner Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Linus Torvalds Cc: Greg Kroah-Hartman Link: https://lkml.kernel.org/r/20180104143710.8961-8-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/alternative.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index dbfd0854651fe4..cf5961ca867746 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end) ".popsection\n" \ ".pushsection .altinstr_replacement, \"ax\"\n" \ ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ - ".popsection" + ".popsection\n" #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ OLDINSTR_2(oldinstr, 1, 2) \ @@ -151,7 +151,7 @@ static inline int alternatives_text_reserved(void *start, void *end) ".pushsection .altinstr_replacement, \"ax\"\n" \ ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ - ".popsection" + ".popsection\n" /* * Alternative instructions for different CPU types or capabilities. From aea4ead5aba7d0990e007d55b806b1e7859a8bed Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 5 Jan 2018 15:27:34 +0100 Subject: [PATCH 1223/3326] x86/pti: Rename BUG_CPU_INSECURE to BUG_CPU_MELTDOWN commit de791821c295cc61419a06fe5562288417d1bc58 upstream. Use the name associated with the particular attack which needs page table isolation for mitigation. Signed-off-by: Thomas Gleixner Acked-by: David Woodhouse Cc: Alan Cox Cc: Jiri Koshina Cc: Linus Torvalds Cc: Tim Chen Cc: Andi Lutomirski Cc: Andi Kleen Cc: Peter Zijlstra Cc: Paul Turner Cc: Tom Lendacky Cc: Greg KH Cc: Dave Hansen Cc: Kees Cook Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801051525300.1724@nanos Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 2 +- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/mm/pti.c | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 07cdd17157050e..21ac898df2d8e4 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -341,6 +341,6 @@ #define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ #define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ -#define X86_BUG_CPU_INSECURE X86_BUG(14) /* CPU is insecure and needs kernel page table isolation */ +#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index b1be494ab4e8ba..2d3bd2215e5b47 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -900,7 +900,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) setup_force_cpu_cap(X86_FEATURE_ALWAYS); if (c->x86_vendor != X86_VENDOR_AMD) - setup_force_cpu_bug(X86_BUG_CPU_INSECURE); + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); fpu__init_system(c); diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 2da28ba9750824..43d4a4a29037ed 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -56,13 +56,13 @@ static void __init pti_print_if_insecure(const char *reason) { - if (boot_cpu_has_bug(X86_BUG_CPU_INSECURE)) + if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) pr_info("%s\n", reason); } static void __init pti_print_if_secure(const char *reason) { - if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE)) + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) pr_info("%s\n", reason); } @@ -96,7 +96,7 @@ void __init pti_check_boottime_disable(void) } autosel: - if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE)) + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) return; enable: setup_force_cpu_cap(X86_FEATURE_PTI); From ae04ca35247af576999da5ef726d1a03fc65de09 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 4 Jan 2018 16:17:49 -0800 Subject: [PATCH 1224/3326] kernel/acct.c: fix the acct->needcheck check in check_free_space() commit 4d9570158b6260f449e317a5f9ed030c2504a615 upstream. As Tsukada explains, the time_is_before_jiffies(acct->needcheck) check is very wrong, we need time_is_after_jiffies() to make sys_acct() work. Ignoring the overflows, the code should "goto out" if needcheck > jiffies, while currently it checks "needcheck < jiffies" and thus in the likely case check_free_space() does nothing until jiffies overflow. In particular this means that sys_acct() is simply broken, acct_on() sets acct->needcheck = jiffies and expects that check_free_space() should set acct->active = 1 after the free-space check, but this won't happen if jiffies increments in between. This was broken by commit 32dc73086015 ("get rid of timer in kern/acct.c") in 2011, then another (correct) commit 795a2f22a8ea ("acct() should honour the limits from the very beginning") made the problem more visible. Link: http://lkml.kernel.org/r/20171213133940.GA6554@redhat.com Fixes: 32dc73086015 ("get rid of timer in kern/acct.c") Reported-by: TSUKADA Koutaro Suggested-by: TSUKADA Koutaro Signed-off-by: Oleg Nesterov Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- kernel/acct.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/acct.c b/kernel/acct.c index 6670fbd3e466d3..354578d253d59c 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -102,7 +102,7 @@ static int check_free_space(struct bsd_acct_struct *acct) { struct kstatfs sbuf; - if (time_is_before_jiffies(acct->needcheck)) + if (time_is_after_jiffies(acct->needcheck)) goto out; /* May block */ From abcc78627877cef68144475e88952b0957406024 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 4 Jan 2018 16:17:52 -0800 Subject: [PATCH 1225/3326] mm/mprotect: add a cond_resched() inside change_pmd_range() commit 4991c09c7c812dba13ea9be79a68b4565bb1fa4e upstream. While testing on a large CPU system, detected the following RCU stall many times over the span of the workload. This problem is solved by adding a cond_resched() in the change_pmd_range() function. INFO: rcu_sched detected stalls on CPUs/tasks: 154-....: (670 ticks this GP) idle=022/140000000000000/0 softirq=2825/2825 fqs=612 (detected by 955, t=6002 jiffies, g=4486, c=4485, q=90864) Sending NMI from CPU 955 to CPUs 154: NMI backtrace for cpu 154 CPU: 154 PID: 147071 Comm: workload Not tainted 4.15.0-rc3+ #3 NIP: c0000000000b3f64 LR: c0000000000b33d4 CTR: 000000000000aa18 REGS: 00000000a4b0fb44 TRAP: 0501 Not tainted (4.15.0-rc3+) MSR: 8000000000009033 CR: 22422082 XER: 00000000 CFAR: 00000000006cf8f0 SOFTE: 1 GPR00: 0010000000000000 c00003ef9b1cb8c0 c0000000010cc600 0000000000000000 GPR04: 8e0000018c32b200 40017b3858fd6e00 8e0000018c32b208 40017b3858fd6e00 GPR08: 8e0000018c32b210 40017b3858fd6e00 8e0000018c32b218 40017b3858fd6e00 GPR12: ffffffffffffffff c00000000fb25100 NIP [c0000000000b3f64] plpar_hcall9+0x44/0x7c LR [c0000000000b33d4] pSeries_lpar_flush_hash_range+0x384/0x420 Call Trace: flush_hash_range+0x48/0x100 __flush_tlb_pending+0x44/0xd0 hpte_need_flush+0x408/0x470 change_protection_range+0xaac/0xf10 change_prot_numa+0x30/0xb0 task_numa_work+0x2d0/0x3e0 task_work_run+0x130/0x190 do_notify_resume+0x118/0x120 ret_from_except_lite+0x70/0x74 Instruction dump: 60000000 f8810028 7ca42b78 7cc53378 7ce63b78 7d074378 7d284b78 7d495378 e9410060 e9610068 e9810070 44000022 <7d806378> e9810028 f88c0000 f8ac0008 Link: http://lkml.kernel.org/r/20171214140551.5794-1-khandual@linux.vnet.ibm.com Signed-off-by: Anshuman Khandual Suggested-by: Nicholas Piggin Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/mprotect.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/mprotect.c b/mm/mprotect.c index ec39f730a0bfee..58b629bb70de30 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -166,7 +166,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, next = pmd_addr_end(addr, end); if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd) && pmd_none_or_clear_bad(pmd)) - continue; + goto next; /* invoke the mmu notifier if the pmd is populated */ if (!mni_start) { @@ -188,7 +188,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, } /* huge pmd was handled */ - continue; + goto next; } } /* fall through, the trans huge pmd just split */ @@ -196,6 +196,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, this_pages = change_pte_range(vma, pmd, addr, next, newprot, dirty_accountable, prot_numa); pages += this_pages; +next: + cond_resched(); } while (pmd++, addr = next, addr != end); if (mni_start) From c86ee796feed73dfa8a895be48c146b018d14393 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Thu, 4 Jan 2018 16:18:06 -0800 Subject: [PATCH 1226/3326] mm/sparse.c: wrong allocation for mem_section commit d09cfbbfa0f761a97687828b5afb27b56cbf2e19 upstream. In commit 83e3c48729d9 ("mm/sparsemem: Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y") mem_section is allocated at runtime to save memory. It allocates the first dimension of array with sizeof(struct mem_section). It costs extra memory, should be sizeof(struct mem_section *). Fix it. Link: http://lkml.kernel.org/r/1513932498-20350-1-git-send-email-bhe@redhat.com Fixes: 83e3c48729 ("mm/sparsemem: Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y") Signed-off-by: Baoquan He Tested-by: Dave Young Acked-by: Kirill A. Shutemov Cc: Kirill A. Shutemov Cc: Ingo Molnar Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Atsushi Kumagai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/sparse.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/sparse.c b/mm/sparse.c index 60805abf98af71..30e56a100ee859 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -211,7 +211,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) if (unlikely(!mem_section)) { unsigned long size, align; - size = sizeof(struct mem_section) * NR_SECTION_ROOTS; + size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; align = 1 << (INTERNODE_CACHE_SHIFT); mem_section = memblock_virt_alloc(size, align); } From 319122a71f306943a75217b3e78a9a0f8e9c1e95 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Thu, 4 Jan 2018 16:18:09 -0800 Subject: [PATCH 1227/3326] userfaultfd: clear the vma->vm_userfaultfd_ctx if UFFD_EVENT_FORK fails commit 0cbb4b4f4c44f54af268969b18d8deda63aded59 upstream. The previous fix in commit 384632e67e08 ("userfaultfd: non-cooperative: fix fork use after free") corrected the refcounting in case of UFFD_EVENT_FORK failure for the fork userfault paths. That still didn't clear the vma->vm_userfaultfd_ctx of the vmas that were set to point to the aborted new uffd ctx earlier in dup_userfaultfd. Link: http://lkml.kernel.org/r/20171223002505.593-2-aarcange@redhat.com Signed-off-by: Andrea Arcangeli Reported-by: syzbot Reviewed-by: Mike Rapoport Cc: Eric Biggers Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/userfaultfd.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 1c713fd5b3e679..5aa392eae1c3c2 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -570,11 +570,14 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, struct userfaultfd_wait_queue *ewq) { + struct userfaultfd_ctx *release_new_ctx; + if (WARN_ON_ONCE(current->flags & PF_EXITING)) goto out; ewq->ctx = ctx; init_waitqueue_entry(&ewq->wq, current); + release_new_ctx = NULL; spin_lock(&ctx->event_wqh.lock); /* @@ -601,8 +604,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, new = (struct userfaultfd_ctx *) (unsigned long) ewq->msg.arg.reserved.reserved1; - - userfaultfd_ctx_put(new); + release_new_ctx = new; } break; } @@ -617,6 +619,20 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, __set_current_state(TASK_RUNNING); spin_unlock(&ctx->event_wqh.lock); + if (release_new_ctx) { + struct vm_area_struct *vma; + struct mm_struct *mm = release_new_ctx->mm; + + /* the various vma->vm_userfaultfd_ctx still points to it */ + down_write(&mm->mmap_sem); + for (vma = mm->mmap; vma; vma = vma->vm_next) + if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; + up_write(&mm->mmap_sem); + + userfaultfd_ctx_put(release_new_ctx); + } + /* * ctx may go away after this if the userfault pseudo fd is * already released. From b9fa21f3daa6533ab2303caad72c996e3924cf3b Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 15 Dec 2017 11:58:27 -0800 Subject: [PATCH 1228/3326] btrfs: fix refcount_t usage when deleting btrfs_delayed_nodes commit ec35e48b286959991cdbb886f1bdeda4575c80b4 upstream. refcounts have a generic implementation and an asm optimized one. The generic version has extra debugging to make sure that once a refcount goes to zero, refcount_inc won't increase it. The btrfs delayed inode code wasn't expecting this, and we're tripping over the warnings when the generic refcounts are used. We ended up with this race: Process A Process B btrfs_get_delayed_node() spin_lock(root->inode_lock) radix_tree_lookup() __btrfs_release_delayed_node() refcount_dec_and_test(&delayed_node->refs) our refcount is now zero refcount_add(2) <--- warning here, refcount unchanged spin_lock(root->inode_lock) radix_tree_delete() With the generic refcounts, we actually warn again when process B above tries to release his refcount because refcount_add() turned into a no-op. We saw this in production on older kernels without the asm optimized refcounts. The fix used here is to use refcount_inc_not_zero() to detect when the object is in the middle of being freed and return NULL. This is almost always the right answer anyway, since we usually end up pitching the delayed_node if it didn't have fresh data in it. This also changes __btrfs_release_delayed_node() to remove the extra check for zero refcounts before radix tree deletion. btrfs_get_delayed_node() was the only path that was allowing refcounts to go from zero to one. Fixes: 6de5f18e7b0da ("btrfs: fix refcount_t usage when deleting btrfs_delayed_node") Signed-off-by: Chris Mason Reviewed-by: Liu Bo Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/delayed-inode.c | 45 ++++++++++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 19e4ad2f3f2e4a..0c4b690cf761b9 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -87,6 +87,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( spin_lock(&root->inode_lock); node = radix_tree_lookup(&root->delayed_nodes_tree, ino); + if (node) { if (btrfs_inode->delayed_node) { refcount_inc(&node->refs); /* can be accessed */ @@ -94,9 +95,30 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( spin_unlock(&root->inode_lock); return node; } - btrfs_inode->delayed_node = node; - /* can be accessed and cached in the inode */ - refcount_add(2, &node->refs); + + /* + * It's possible that we're racing into the middle of removing + * this node from the radix tree. In this case, the refcount + * was zero and it should never go back to one. Just return + * NULL like it was never in the radix at all; our release + * function is in the process of removing it. + * + * Some implementations of refcount_inc refuse to bump the + * refcount once it has hit zero. If we don't do this dance + * here, refcount_inc() may decide to just WARN_ONCE() instead + * of actually bumping the refcount. + * + * If this node is properly in the radix, we want to bump the + * refcount twice, once for the inode and once for this get + * operation. + */ + if (refcount_inc_not_zero(&node->refs)) { + refcount_inc(&node->refs); + btrfs_inode->delayed_node = node; + } else { + node = NULL; + } + spin_unlock(&root->inode_lock); return node; } @@ -254,17 +276,18 @@ static void __btrfs_release_delayed_node( mutex_unlock(&delayed_node->mutex); if (refcount_dec_and_test(&delayed_node->refs)) { - bool free = false; struct btrfs_root *root = delayed_node->root; + spin_lock(&root->inode_lock); - if (refcount_read(&delayed_node->refs) == 0) { - radix_tree_delete(&root->delayed_nodes_tree, - delayed_node->inode_id); - free = true; - } + /* + * Once our refcount goes to zero, nobody is allowed to bump it + * back up. We can delete it now. + */ + ASSERT(refcount_read(&delayed_node->refs) == 0); + radix_tree_delete(&root->delayed_nodes_tree, + delayed_node->inode_id); spin_unlock(&root->inode_lock); - if (free) - kmem_cache_free(delayed_node_cache, delayed_node); + kmem_cache_free(delayed_node_cache, delayed_node); } } From 95a362c9a6892085f714eb6e31eea6a0e3aa93bf Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jan 2018 17:21:10 +0000 Subject: [PATCH 1229/3326] efi/capsule-loader: Reinstate virtual capsule mapping commit f24c4d478013d82bd1b943df566fff3561d52864 upstream. Commit: 82c3768b8d68 ("efi/capsule-loader: Use a cached copy of the capsule header") ... refactored the capsule loading code that maps the capsule header, to avoid having to map it several times. However, as it turns out, the vmap() call we ended up removing did not just map the header, but the entire capsule image, and dropping this virtual mapping breaks capsules that are processed by the firmware immediately (i.e., without a reboot). Unfortunately, that change was part of a larger refactor that allowed a quirk to be implemented for Quark, which has a non-standard memory layout for capsules, and we have slightly painted ourselves into a corner by allowing quirk code to mangle the capsule header and memory layout. So we need to fix this without breaking Quark. Fortunately, Quark does not appear to care about the virtual mapping, and so we can simply do a partial revert of commit: 2a457fb31df6 ("efi/capsule-loader: Use page addresses rather than struct page pointers") ... and create a vmap() mapping of the entire capsule (including header) based on the reinstated struct page array, unless running on Quark, in which case we pass the capsule header copy as before. Reported-by: Ge Song Tested-by: Bryan O'Donoghue Tested-by: Ge Song Signed-off-by: Ard Biesheuvel Cc: Dave Young Cc: Linus Torvalds Cc: Matt Fleming Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Fixes: 82c3768b8d68 ("efi/capsule-loader: Use a cached copy of the capsule header") Link: http://lkml.kernel.org/r/20180102172110.17018-3-ard.biesheuvel@linaro.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/platform/efi/quirks.c | 13 +++++++- drivers/firmware/efi/capsule-loader.c | 45 ++++++++++++++++++++++----- include/linux/efi.h | 4 ++- 3 files changed, 52 insertions(+), 10 deletions(-) diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 8a99a2e96537a9..5b513ccffde404 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -592,7 +592,18 @@ static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff, /* * Update the first page pointer to skip over the CSH header. */ - cap_info->pages[0] += csh->headersize; + cap_info->phys[0] += csh->headersize; + + /* + * cap_info->capsule should point at a virtual mapping of the entire + * capsule, starting at the capsule header. Our image has the Quark + * security header prepended, so we cannot rely on the default vmap() + * mapping created by the generic capsule code. + * Given that the Quark firmware does not appear to care about the + * virtual mapping, let's just point cap_info->capsule at our copy + * of the capsule header. + */ + cap_info->capsule = &cap_info->header; return 1; } diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index ec8ac5c4dd84f9..055e2e8f985a3f 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -20,10 +20,6 @@ #define NO_FURTHER_WRITE_ACTION -1 -#ifndef phys_to_page -#define phys_to_page(x) pfn_to_page((x) >> PAGE_SHIFT) -#endif - /** * efi_free_all_buff_pages - free all previous allocated buffer pages * @cap_info: pointer to current instance of capsule_info structure @@ -35,7 +31,7 @@ static void efi_free_all_buff_pages(struct capsule_info *cap_info) { while (cap_info->index > 0) - __free_page(phys_to_page(cap_info->pages[--cap_info->index])); + __free_page(cap_info->pages[--cap_info->index]); cap_info->index = NO_FURTHER_WRITE_ACTION; } @@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info) cap_info->pages = temp_page; + temp_page = krealloc(cap_info->phys, + pages_needed * sizeof(phys_addr_t *), + GFP_KERNEL | __GFP_ZERO); + if (!temp_page) + return -ENOMEM; + + cap_info->phys = temp_page; + return 0; } @@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff, **/ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info) { + bool do_vunmap = false; int ret; - ret = efi_capsule_update(&cap_info->header, cap_info->pages); + /* + * cap_info->capsule may have been assigned already by a quirk + * handler, so only overwrite it if it is NULL + */ + if (!cap_info->capsule) { + cap_info->capsule = vmap(cap_info->pages, cap_info->index, + VM_MAP, PAGE_KERNEL); + if (!cap_info->capsule) + return -ENOMEM; + do_vunmap = true; + } + + ret = efi_capsule_update(cap_info->capsule, cap_info->phys); + if (do_vunmap) + vunmap(cap_info->capsule); if (ret) { pr_err("capsule update failed\n"); return ret; @@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff, goto failed; } - cap_info->pages[cap_info->index++] = page_to_phys(page); + cap_info->pages[cap_info->index] = page; + cap_info->phys[cap_info->index] = page_to_phys(page); cap_info->page_bytes_remain = PAGE_SIZE; + cap_info->index++; } else { - page = phys_to_page(cap_info->pages[cap_info->index - 1]); + page = cap_info->pages[cap_info->index - 1]; } kbuff = kmap(page); @@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct file *file) struct capsule_info *cap_info = file->private_data; kfree(cap_info->pages); + kfree(cap_info->phys); kfree(file->private_data); file->private_data = NULL; return 0; @@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct file *file) return -ENOMEM; } + cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL); + if (!cap_info->phys) { + kfree(cap_info->pages); + kfree(cap_info); + return -ENOMEM; + } + file->private_data = cap_info; return 0; diff --git a/include/linux/efi.h b/include/linux/efi.h index d813f7b04da7a1..29fdf8029cf6fe 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -140,11 +140,13 @@ struct efi_boot_memmap { struct capsule_info { efi_capsule_header_t header; + efi_capsule_header_t *capsule; int reset_type; long index; size_t count; size_t total_size; - phys_addr_t *pages; + struct page **pages; + phys_addr_t *phys; size_t page_bytes_remain; }; From 491062a0099c8bf6841dece8dd501d9fc2b80e34 Mon Sep 17 00:00:00 2001 From: Jan Engelhardt Date: Tue, 19 Dec 2017 19:09:07 +0100 Subject: [PATCH 1230/3326] crypto: n2 - cure use after free commit 203f45003a3d03eea8fa28d74cfc74c354416fdb upstream. queue_cache_init is first called for the Control Word Queue (n2_crypto_probe). At that time, queue_cache[0] is NULL and a new kmem_cache will be allocated. If the subsequent n2_register_algs call fails, the kmem_cache will be released in queue_cache_destroy, but queue_cache_init[0] is not set back to NULL. So when the Module Arithmetic Unit gets probed next (n2_mau_probe), queue_cache_init will not allocate a kmem_cache again, but leave it as its bogus value, causing a BUG() to trigger when queue_cache[0] is eventually passed to kmem_cache_zalloc: n2_crypto: Found N2CP at /virtual-devices@100/n2cp@7 n2_crypto: Registered NCS HVAPI version 2.0 called queue_cache_init n2_crypto: md5 alg registration failed n2cp f028687c: /virtual-devices@100/n2cp@7: Unable to register algorithms. called queue_cache_destroy n2cp: probe of f028687c failed with error -22 n2_crypto: Found NCP at /virtual-devices@100/ncp@6 n2_crypto: Registered NCS HVAPI version 2.0 called queue_cache_init kernel BUG at mm/slab.c:2993! Call Trace: [0000000000604488] kmem_cache_alloc+0x1a8/0x1e0 (inlined) kmem_cache_zalloc (inlined) new_queue (inlined) spu_queue_setup (inlined) handle_exec_unit [0000000010c61eb4] spu_mdesc_scan+0x1f4/0x460 [n2_crypto] [0000000010c62b80] n2_mau_probe+0x100/0x220 [n2_crypto] [000000000084b174] platform_drv_probe+0x34/0xc0 Signed-off-by: Jan Engelhardt Acked-by: David S. Miller Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/n2_core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index a9fd8b9e86cde2..699ee5a9a8f992 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1625,6 +1625,7 @@ static int queue_cache_init(void) CWQ_ENTRY_SIZE, 0, NULL); if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); + queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; return -ENOMEM; } return 0; @@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void) { kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); + queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; + queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL; } static long spu_queue_register_workfn(void *arg) From 9c36498f74533e0b1fdcb9692a04482bc2d91d8d Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 11 Dec 2017 12:15:17 -0800 Subject: [PATCH 1231/3326] crypto: chacha20poly1305 - validate the digest size commit e57121d08c38dabec15cf3e1e2ad46721af30cae upstream. If the rfc7539 template was instantiated with a hash algorithm with digest size larger than 16 bytes (POLY1305_DIGEST_SIZE), then the digest overran the 'tag' buffer in 'struct chachapoly_req_ctx', corrupting the subsequent memory, including 'cryptlen'. This caused a crash during crypto_skcipher_decrypt(). Fix it by, when instantiating the template, requiring that the underlying hash algorithm has the digest size expected for Poly1305. Reproducer: #include #include #include int main() { int algfd, reqfd; struct sockaddr_alg addr = { .salg_type = "aead", .salg_name = "rfc7539(chacha20,sha256)", }; unsigned char buf[32] = { 0 }; algfd = socket(AF_ALG, SOCK_SEQPACKET, 0); bind(algfd, (void *)&addr, sizeof(addr)); setsockopt(algfd, SOL_ALG, ALG_SET_KEY, buf, sizeof(buf)); reqfd = accept(algfd, 0, 0); write(reqfd, buf, 16); read(reqfd, buf, 16); } Reported-by: syzbot Fixes: 71ebc4d1b27d ("crypto: chacha20poly1305 - Add a ChaCha20-Poly1305 AEAD construction, RFC7539") Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/chacha20poly1305.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index db1bc3147bc470..600afa99941fe0 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, algt->mask)); if (IS_ERR(poly)) return PTR_ERR(poly); + poly_hash = __crypto_hash_alg_common(poly); + + err = -EINVAL; + if (poly_hash->digestsize != POLY1305_DIGEST_SIZE) + goto out_put_poly; err = -ENOMEM; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); @@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, ctx = aead_instance_ctx(inst); ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; - poly_hash = __crypto_hash_alg_common(poly); err = crypto_init_ahash_spawn(&ctx->poly, poly_hash, aead_crypto_instance(inst)); if (err) From 7156c794b8ab462705e6ac80c5fa69565eb44c62 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 20 Dec 2017 14:28:25 -0800 Subject: [PATCH 1232/3326] crypto: pcrypt - fix freeing pcrypt instances commit d76c68109f37cb85b243a1cf0f40313afd2bae68 upstream. pcrypt is using the old way of freeing instances, where the ->free() method specified in the 'struct crypto_template' is passed a pointer to the 'struct crypto_instance'. But the crypto_instance is being kfree()'d directly, which is incorrect because the memory was actually allocated as an aead_instance, which contains the crypto_instance at a nonzero offset. Thus, the wrong pointer was being kfree()'d. Fix it by switching to the new way to free aead_instance's where the ->free() method is specified in the aead_instance itself. Reported-by: syzbot Fixes: 0496f56065e0 ("crypto: pcrypt - Add support for new AEAD interface") Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/pcrypt.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index ee9cfb99fe256a..f8ec3d4ba4a80f 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm) crypto_free_aead(ctx->child); } +static void pcrypt_free(struct aead_instance *inst) +{ + struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst); + + crypto_drop_aead(&ctx->spawn); + kfree(inst); +} + static int pcrypt_init_instance(struct crypto_instance *inst, struct crypto_alg *alg) { @@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.encrypt = pcrypt_aead_encrypt; inst->alg.decrypt = pcrypt_aead_decrypt; + inst->free = pcrypt_free; + err = aead_register_instance(tmpl, inst); if (err) goto out_drop_aead; @@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) return -EINVAL; } -static void pcrypt_free(struct crypto_instance *inst) -{ - struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); - - crypto_drop_aead(&ctx->spawn); - kfree(inst); -} - static int pcrypt_cpumask_change_notify(struct notifier_block *self, unsigned long val, void *data) { @@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .create = pcrypt_create, - .free = pcrypt_free, .module = THIS_MODULE, }; From d4f9aaeb26d024f2f239b19a68d3e339628e1066 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 5 Dec 2017 11:10:26 +0100 Subject: [PATCH 1233/3326] crypto: chelsio - select CRYPTO_GF128MUL commit d042566d8c704e1ecec370300545d4a409222e39 upstream. Without the gf128mul library support, we can run into a link error: drivers/crypto/chelsio/chcr_algo.o: In function `chcr_update_tweak': chcr_algo.c:(.text+0x7e0): undefined reference to `gf128mul_x8_ble' This adds a Kconfig select statement for it, next to the ones we already have. Fixes: b8fd1f4170e7 ("crypto: chcr - Add ctr mode and process large sg entries for cipher") Signed-off-by: Arnd Bergmann Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/chelsio/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig index 3e104f5aa0c2f9..b56b3f711d9410 100644 --- a/drivers/crypto/chelsio/Kconfig +++ b/drivers/crypto/chelsio/Kconfig @@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO select CRYPTO_SHA256 select CRYPTO_SHA512 select CRYPTO_AUTHENC + select CRYPTO_GF128MUL ---help--- The Chelsio Crypto Co-processor driver for T6 adapters. From 5d8c39173bd71ed53ae88de77a8ec94f30b69919 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 8 Dec 2017 23:37:36 +0200 Subject: [PATCH 1234/3326] drm/i915: Disable DC states around GMBUS on GLK MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 3488d0237f6364614f0c59d6d784bb79b11eeb92 upstream. Prevent the DMC from destroying GMBUS transfers on GLK. GMBUS lives in PG1 so DC off is all we need. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20171208213739.16388-1-ville.syrjala@linux.intel.com Reviewed-by: Dhinakaran Pandiyan (cherry picked from commit 156961ae7bdf6feb72778e8da83d321b273343fd) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_runtime_pm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 49577eba8e7efc..6e774b2673b418 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -1786,6 +1786,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_GMBUS) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ From 5d8c5beecdd9098458a047a871cd20fae3f4c6b2 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Tue, 2 Jan 2018 12:18:37 -0800 Subject: [PATCH 1235/3326] drm/i915: Apply Display WA #1183 on skl, kbl, and cfl MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 30414f3010aff95ffdb6bed7b9dce62cde94fdc7 upstream. Display WA #1183 was recently added to workaround "Failures when enabling DPLL0 with eDP link rate 2.16 or 4.32 GHz and CD clock frequency 308.57 or 617.14 MHz (CDCLK_CTL CD Frequency Select 10b or 11b) used in this enabling or in previous enabling." This workaround was designed to minimize the impact only to save the bad case with that link rates. But HW engineers indicated that it should be safe to apply broadly, although they were expecting the DPLL0 link rate to be unchanged on runtime. We need to cover 2 cases: when we are in fact enabling DPLL0 and when we are just changing the frequency with small differences. This is based on previous patch by Rodrigo Vivi with suggestions from Ville Syrjälä. Cc: Arthur J Runyan Cc: Ville Syrjälä Cc: Rodrigo Vivi Signed-off-by: Lucas De Marchi Reviewed-by: Ville Syrjälä Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20171204232210.4958-1-lucas.demarchi@intel.com (cherry picked from commit 53421c2fe99ce16838639ad89d772d914a119a49) [ Lucas: Backport to 4.15 adding back variable that has been removed on commits not meant to be backported ] Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20180102201837.6812-1-lucas.demarchi@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/i915_reg.h | 2 ++ drivers/gpu/drm/i915/intel_cdclk.c | 35 ++++++++++++++++++------- drivers/gpu/drm/i915/intel_runtime_pm.c | 10 +++++++ 3 files changed, 38 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c9bcc6c450126e..ce2ed16f2a309c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6944,6 +6944,7 @@ enum { #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) +#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30) #define MASK_WAKEMEM (1<<13) #define SKL_DFSM _MMIO(0x51000) @@ -8475,6 +8476,7 @@ enum skl_power_gate { #define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) #define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) #define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20) +#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19) #define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) #define CDCLK_FREQ_DECIMAL_MASK (0x7ff) diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 1241e5891b2951..26a8dcd2c54942 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -859,16 +859,10 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) { - int min_cdclk = skl_calc_cdclk(0, vco); u32 val; WARN_ON(vco != 8100000 && vco != 8640000); - /* select the minimum CDCLK before enabling DPLL 0 */ - val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk); - I915_WRITE(CDCLK_CTL, val); - POSTING_READ(CDCLK_CTL); - /* * We always enable DPLL0 with the lowest link rate possible, but still * taking into account the VCO required to operate the eDP panel at the @@ -922,7 +916,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, { int cdclk = cdclk_state->cdclk; int vco = cdclk_state->vco; - u32 freq_select, pcu_ack; + u32 freq_select, pcu_ack, cdclk_ctl; int ret; WARN_ON((cdclk == 24000) != (vco == 0)); @@ -939,7 +933,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, return; } - /* set CDCLK_CTL */ + /* Choose frequency for this cdclk */ switch (cdclk) { case 450000: case 432000: @@ -967,10 +961,33 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, dev_priv->cdclk.hw.vco != vco) skl_dpll0_disable(dev_priv); + cdclk_ctl = I915_READ(CDCLK_CTL); + + if (dev_priv->cdclk.hw.vco != vco) { + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); + cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); + I915_WRITE(CDCLK_CTL, cdclk_ctl); + } + + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE; + I915_WRITE(CDCLK_CTL, cdclk_ctl); + POSTING_READ(CDCLK_CTL); + if (dev_priv->cdclk.hw.vco != vco) skl_dpll0_enable(dev_priv, vco); - I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk)); + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); + I915_WRITE(CDCLK_CTL, cdclk_ctl); + + cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); + I915_WRITE(CDCLK_CTL, cdclk_ctl); + + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE; + I915_WRITE(CDCLK_CTL, cdclk_ctl); POSTING_READ(CDCLK_CTL); /* inform PCU of the change */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6e774b2673b418..51cb5293bf43a6 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Enabling DC5\n"); + /* Wa Display #1183: skl,kbl,cfl */ + if (IS_GEN9_BC(dev_priv)) + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | + SKL_SELECT_ALTERNATE_DC_EXIT); + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); } @@ -625,6 +630,11 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv) { DRM_DEBUG_KMS("Disabling DC6\n"); + /* Wa Display #1183: skl,kbl,cfl */ + if (IS_GEN9_BC(dev_priv)) + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | + SKL_SELECT_ALTERNATE_DC_EXIT); + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); } From 2ee35943b11f07de678642a8f08e61e5d5f3867a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Br=C3=BCns?= Date: Mon, 27 Nov 2017 20:05:34 +0100 Subject: [PATCH 1236/3326] sunxi-rsb: Include OF based modalias in device uevent MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit e2bf801ecd4e62222a46d1ba9e57e710171d29c1 upstream. Include the OF-based modalias in the uevent sent when registering devices on the sunxi RSB bus, so that user space has a chance to autoload the kernel module for the device. Fixes a regression caused by commit 3f241bfa60bd ("arm64: allwinner: a64: pine64: Use dcdc1 regulator for mmc0"). When the axp20x-rsb module for the AXP803 PMIC is built as a module, it is not loaded and the system ends up with an disfunctional MMC controller. Fixes: d787dcdb9c8f ("bus: sunxi-rsb: Add driver for Allwinner Reduced Serial Bus") Acked-by: Chen-Yu Tsai Signed-off-by: Stefan Brüns Signed-off-by: Maxime Ripard Signed-off-by: Greg Kroah-Hartman --- drivers/bus/sunxi-rsb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 328ca93781cf26..1b76d958590275 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = { .match = sunxi_rsb_device_match, .probe = sunxi_rsb_device_probe, .remove = sunxi_rsb_device_remove, + .uevent = of_device_uevent_modalias, }; static void sunxi_rsb_dev_release(struct device *dev) From 2335a22b2224f71ac639becef4a6cb8ef54c8b7e Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 2 Jan 2018 10:02:19 +0000 Subject: [PATCH 1237/3326] fscache: Fix the default for fscache_maybe_release_page() commit 98801506552593c9b8ac11021b0cdad12cab4f6b upstream. Fix the default for fscache_maybe_release_page() for when the cookie isn't valid or the page isn't cached. It mustn't return false as that indicates the page cannot yet be freed. The problem with the default is that if, say, there's no cache, but a network filesystem's pages are using up almost all the available memory, a system can OOM because the filesystem ->releasepage() op will not allow them to be released as fscache_maybe_release_page() incorrectly prevents it. This can be tested by writing a sequence of 512MiB files to an AFS mount. It does not affect NFS or CIFS because both of those wrap the call in a check of PG_fscache and it shouldn't bother Ceph as that only has PG_private set whilst writeback is in progress. This might be an issue for 9P, however. Note that the pages aren't entirely stuck. Removing a file or unmounting will clear things because that uses ->invalidatepage() instead. Fixes: 201a15428bd5 ("FS-Cache: Handle pages pending storage that get evicted under OOM conditions") Reported-by: Marc Dionne Signed-off-by: David Howells Reviewed-by: Jeff Layton Acked-by: Al Viro Tested-by: Marc Dionne Signed-off-by: Greg Kroah-Hartman --- include/linux/fscache.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/fscache.h b/include/linux/fscache.h index f4ff47d4a893a7..fe0c349684fa83 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -755,7 +755,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie, { if (fscache_cookie_valid(cookie) && PageFsCache(page)) return __fscache_maybe_release_page(cookie, page, gfp); - return false; + return true; } /** From eff3e4fde76492931a45e23767c7842524b0ea06 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 13 Nov 2017 02:15:39 +0100 Subject: [PATCH 1238/3326] x86 / CPU: Avoid unnecessary IPIs in arch_freq_get_on_cpu() commit b29c6ef7bb1257853c1e31616d84f55e561cf631 upstream. Even though aperfmperf_snapshot_khz() caches the samples.khz value to return if called again in a sufficiently short time, its caller, arch_freq_get_on_cpu(), still uses smp_call_function_single() to run it which may allow user space to trigger an IPI storm by reading from the scaling_cur_freq cpufreq sysfs file in a tight loop. To avoid that, move the decision on whether or not to return the cached samples.khz value to arch_freq_get_on_cpu(). This change was part of commit 941f5f0f6ef5 ("x86: CPU: Fix up "cpu MHz" in /proc/cpuinfo"), but it was not the reason for the revert and it remains applicable. Fixes: 4815d3c56d1e (cpufreq: x86: Make scaling_cur_freq behave more as expected) Signed-off-by: Rafael J. Wysocki Reviewed-by: WANG Chao Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/aperfmperf.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 0ee83321a3136f..957813e0180d27 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -42,10 +42,6 @@ static void aperfmperf_snapshot_khz(void *dummy) s64 time_delta = ktime_ms_delta(now, s->time); unsigned long flags; - /* Don't bother re-computing within the cache threshold time. */ - if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS) - return; - local_irq_save(flags); rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); @@ -74,6 +70,7 @@ static void aperfmperf_snapshot_khz(void *dummy) unsigned int arch_freq_get_on_cpu(int cpu) { + s64 time_delta; unsigned int khz; if (!cpu_khz) @@ -82,6 +79,12 @@ unsigned int arch_freq_get_on_cpu(int cpu) if (!static_cpu_has(X86_FEATURE_APERFMPERF)) return 0; + /* Don't bother re-computing within the cache threshold time. */ + time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu)); + khz = per_cpu(samples.khz, cpu); + if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS) + return khz; + smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1); khz = per_cpu(samples.khz, cpu); if (khz) From 22af48be826c4193bba2f11112330aabb2568594 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 15 Nov 2017 02:13:40 +0100 Subject: [PATCH 1239/3326] x86 / CPU: Always show current CPU frequency in /proc/cpuinfo commit 7d5905dc14a87805a59f3c5bf70173aac2bb18f8 upstream. After commit 890da9cf0983 (Revert "x86: do not use cpufreq_quick_get() for /proc/cpuinfo "cpu MHz"") the "cpu MHz" number in /proc/cpuinfo on x86 can be either the nominal CPU frequency (which is constant) or the frequency most recently requested by a scaling governor in cpufreq, depending on the cpufreq configuration. That is somewhat inconsistent and is different from what it was before 4.13, so in order to restore the previous behavior, make it report the current CPU frequency like the scaling_cur_freq sysfs file in cpufreq. To that end, modify the /proc/cpuinfo implementation on x86 to use aperfmperf_snapshot_khz() to snapshot the APERF and MPERF feedback registers, if available, and use their values to compute the CPU frequency to be reported as "cpu MHz". However, do that carefully enough to avoid accumulating delays that lead to unacceptable access times for /proc/cpuinfo on systems with many CPUs. Run aperfmperf_snapshot_khz() once on all CPUs asynchronously at the /proc/cpuinfo open time, add a single delay upfront (if necessary) at that point and simply compute the current frequency while running show_cpuinfo() for each individual CPU. Also, to avoid slowing down /proc/cpuinfo accesses too much, reduce the default delay between consecutive APERF and MPERF reads to 10 ms, which should be sufficient to get large enough numbers for the frequency computation in all cases. Fixes: 890da9cf0983 (Revert "x86: do not use cpufreq_quick_get() for /proc/cpuinfo "cpu MHz"") Signed-off-by: Rafael J. Wysocki Acked-by: Thomas Gleixner Tested-by: Thomas Gleixner Acked-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/Makefile | 2 +- arch/x86/kernel/cpu/aperfmperf.c | 74 ++++++++++++++++++++++---------- arch/x86/kernel/cpu/cpu.h | 3 ++ arch/x86/kernel/cpu/proc.c | 6 ++- fs/proc/cpuinfo.c | 6 +++ include/linux/cpufreq.h | 1 + 6 files changed, 68 insertions(+), 24 deletions(-) diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 90cb82dbba577c..570e8bb1f386d9 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -22,7 +22,7 @@ obj-y += common.o obj-y += rdrand.o obj-y += match.o obj-y += bugs.o -obj-$(CONFIG_CPU_FREQ) += aperfmperf.o +obj-y += aperfmperf.o obj-y += cpuid-deps.o obj-$(CONFIG_PROC_FS) += proc.o diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 957813e0180d27..7eba34df54c3d4 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -14,6 +14,8 @@ #include #include +#include "cpu.h" + struct aperfmperf_sample { unsigned int khz; ktime_t time; @@ -24,7 +26,7 @@ struct aperfmperf_sample { static DEFINE_PER_CPU(struct aperfmperf_sample, samples); #define APERFMPERF_CACHE_THRESHOLD_MS 10 -#define APERFMPERF_REFRESH_DELAY_MS 20 +#define APERFMPERF_REFRESH_DELAY_MS 10 #define APERFMPERF_STALE_THRESHOLD_MS 1000 /* @@ -38,8 +40,6 @@ static void aperfmperf_snapshot_khz(void *dummy) u64 aperf, aperf_delta; u64 mperf, mperf_delta; struct aperfmperf_sample *s = this_cpu_ptr(&samples); - ktime_t now = ktime_get(); - s64 time_delta = ktime_ms_delta(now, s->time); unsigned long flags; local_irq_save(flags); @@ -57,38 +57,68 @@ static void aperfmperf_snapshot_khz(void *dummy) if (mperf_delta == 0) return; - s->time = now; + s->time = ktime_get(); s->aperf = aperf; s->mperf = mperf; - - /* If the previous iteration was too long ago, discard it. */ - if (time_delta > APERFMPERF_STALE_THRESHOLD_MS) - s->khz = 0; - else - s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta); + s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta); } -unsigned int arch_freq_get_on_cpu(int cpu) +static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait) { - s64 time_delta; - unsigned int khz; + s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu)); + + /* Don't bother re-computing within the cache threshold time. */ + if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS) + return true; + + smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait); + + /* Return false if the previous iteration was too long ago. */ + return time_delta <= APERFMPERF_STALE_THRESHOLD_MS; +} +unsigned int aperfmperf_get_khz(int cpu) +{ if (!cpu_khz) return 0; if (!static_cpu_has(X86_FEATURE_APERFMPERF)) return 0; - /* Don't bother re-computing within the cache threshold time. */ - time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu)); - khz = per_cpu(samples.khz, cpu); - if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS) - return khz; + aperfmperf_snapshot_cpu(cpu, ktime_get(), true); + return per_cpu(samples.khz, cpu); +} - smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1); - khz = per_cpu(samples.khz, cpu); - if (khz) - return khz; +void arch_freq_prepare_all(void) +{ + ktime_t now = ktime_get(); + bool wait = false; + int cpu; + + if (!cpu_khz) + return; + + if (!static_cpu_has(X86_FEATURE_APERFMPERF)) + return; + + for_each_online_cpu(cpu) + if (!aperfmperf_snapshot_cpu(cpu, now, false)) + wait = true; + + if (wait) + msleep(APERFMPERF_REFRESH_DELAY_MS); +} + +unsigned int arch_freq_get_on_cpu(int cpu) +{ + if (!cpu_khz) + return 0; + + if (!static_cpu_has(X86_FEATURE_APERFMPERF)) + return 0; + + if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true)) + return per_cpu(samples.khz, cpu); msleep(APERFMPERF_REFRESH_DELAY_MS); smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1); diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index f52a370b6c00f9..e806b11a99af4c 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -47,4 +47,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], extern void get_cpu_cap(struct cpuinfo_x86 *c); extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); + +unsigned int aperfmperf_get_khz(int cpu); + #endif /* ARCH_X86_CPU_H */ diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 6b7e17bf0b71dd..e7ecedafa1c8f7 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -5,6 +5,8 @@ #include #include +#include "cpu.h" + /* * Get CPU information for use by the procfs. */ @@ -78,8 +80,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "microcode\t: 0x%x\n", c->microcode); if (cpu_has(c, X86_FEATURE_TSC)) { - unsigned int freq = cpufreq_quick_get(cpu); + unsigned int freq = aperfmperf_get_khz(cpu); + if (!freq) + freq = cpufreq_quick_get(cpu); if (!freq) freq = cpu_khz; seq_printf(m, "cpu MHz\t\t: %u.%03u\n", diff --git a/fs/proc/cpuinfo.c b/fs/proc/cpuinfo.c index e0f867cd855375..96f1087e372c19 100644 --- a/fs/proc/cpuinfo.c +++ b/fs/proc/cpuinfo.c @@ -1,12 +1,18 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include #include #include #include +__weak void arch_freq_prepare_all(void) +{ +} + extern const struct seq_operations cpuinfo_op; static int cpuinfo_open(struct inode *inode, struct file *file) { + arch_freq_prepare_all(); return seq_open(file, &cpuinfo_op); } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 537ff842ff7332..cbf85c4c745f85 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -917,6 +917,7 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) } #endif +extern void arch_freq_prepare_all(void); extern unsigned int arch_freq_get_on_cpu(int cpu); /* the following are really really optional */ From 2ebd69e8c251597f51768dce320cc10f9b4c8894 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 17 Nov 2017 15:30:01 -0800 Subject: [PATCH 1240/3326] kernel/signal.c: protect the traced SIGNAL_UNKILLABLE tasks from SIGKILL commit 628c1bcba204052d19b686b5bac149a644cdb72e upstream. The comment in sig_ignored() says "Tracers may want to know about even ignored signals" but SIGKILL can not be reported to debugger and it is just wrong to return 0 in this case: SIGKILL should only kill the SIGNAL_UNKILLABLE task if it comes from the parent ns. Change sig_ignored() to ignore ->ptrace if sig == SIGKILL and rely on sig_task_ignored(). SISGTOP coming from within the namespace is not really right too but at least debugger can intercept it, and we can't drop it here because this will break "gdb -p 1": ptrace_attach() won't work. Perhaps we will add another ->ptrace check later, we will see. Link: http://lkml.kernel.org/r/20171103184206.GB21036@redhat.com Signed-off-by: Oleg Nesterov Tested-by: Kyle Huey Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- kernel/signal.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/kernel/signal.c b/kernel/signal.c index 8dcd8825b2dedf..7339d737188590 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -94,13 +94,15 @@ static int sig_ignored(struct task_struct *t, int sig, bool force) if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) return 0; - if (!sig_task_ignored(t, sig, force)) - return 0; - /* - * Tracers may want to know about even ignored signals. + * Tracers may want to know about even ignored signal unless it + * is SIGKILL which can't be reported anyway but can be ignored + * by SIGNAL_UNKILLABLE task. */ - return !t->ptrace; + if (t->ptrace && sig != SIGKILL) + return 0; + + return sig_task_ignored(t, sig, force); } /* From 94429f353675f4e3e5fefdaf3e01a18d44074f87 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 17 Nov 2017 15:30:04 -0800 Subject: [PATCH 1241/3326] kernel/signal.c: protect the SIGNAL_UNKILLABLE tasks from !sig_kernel_only() signals commit ac25385089f673560867eb5179228a44ade0cfc1 upstream. Change sig_task_ignored() to drop the SIG_DFL && !sig_kernel_only() signals even if force == T. This simplifies the next change and this matches the same check in get_signal() which will drop these signals anyway. Link: http://lkml.kernel.org/r/20171103184227.GC21036@redhat.com Signed-off-by: Oleg Nesterov Tested-by: Kyle Huey Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- kernel/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/signal.c b/kernel/signal.c index 7339d737188590..8882fe994886d8 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -78,7 +78,7 @@ static int sig_task_ignored(struct task_struct *t, int sig, bool force) handler = sig_handler(t, sig); if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && - handler == SIG_DFL && !force) + handler == SIG_DFL && !(force && sig_kernel_only(sig))) return 1; return sig_handler_ignored(handler, sig); From 289b51d3c17be8855c5b4be87d8e0e78cc70d511 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 17 Nov 2017 15:30:08 -0800 Subject: [PATCH 1242/3326] kernel/signal.c: remove the no longer needed SIGNAL_UNKILLABLE check in complete_signal() commit 426915796ccaf9c2bd9bb06dc5702225957bc2e5 upstream. complete_signal() checks SIGNAL_UNKILLABLE before it starts to destroy the thread group, today this is wrong in many ways. If nothing else, fatal_signal_pending() should always imply that the whole thread group (except ->group_exit_task if it is not NULL) is killed, this check breaks the rule. After the previous changes we can rely on sig_task_ignored(); sig_fatal(sig) && SIGNAL_UNKILLABLE can only be true if we actually want to kill this task and sig == SIGKILL OR it is traced and debugger can intercept the signal. This should hopefully fix the problem reported by Dmitry. This test-case static int init(void *arg) { for (;;) pause(); } int main(void) { char stack[16 * 1024]; for (;;) { int pid = clone(init, stack + sizeof(stack)/2, CLONE_NEWPID | SIGCHLD, NULL); assert(pid > 0); assert(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0); assert(waitpid(-1, NULL, WSTOPPED) == pid); assert(ptrace(PTRACE_DETACH, pid, 0, SIGSTOP) == 0); assert(syscall(__NR_tkill, pid, SIGKILL) == 0); assert(pid == wait(NULL)); } } triggers the WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)) in task_participate_group_stop(). do_signal_stop()->signal_group_exit() checks SIGNAL_GROUP_EXIT and return false, but task_set_jobctl_pending() checks fatal_signal_pending() and does not set JOBCTL_STOP_PENDING. And his should fix the minor security problem reported by Kyle, SECCOMP_RET_TRACE can miss fatal_signal_pending() the same way if the task is the root of a pid namespace. Link: http://lkml.kernel.org/r/20171103184246.GD21036@redhat.com Signed-off-by: Oleg Nesterov Reported-by: Dmitry Vyukov Reported-by: Kyle Huey Reviewed-by: Kees Cook Tested-by: Kyle Huey Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- kernel/signal.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/signal.c b/kernel/signal.c index 8882fe994886d8..1facff1dbbaecb 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -931,9 +931,9 @@ static void complete_signal(int sig, struct task_struct *p, int group) * then start taking the whole group down immediately. */ if (sig_fatal(p, sig) && - !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && + !(signal->flags & SIGNAL_GROUP_EXIT) && !sigismember(&t->real_blocked, sig) && - (sig == SIGKILL || !t->ptrace)) { + (sig == SIGKILL || !p->ptrace)) { /* * This signal will be fatal to the whole group. */ From 3ea9d6d0ad67c6a978604e5f16af5683c435e082 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Thu, 14 Dec 2017 11:03:01 +0000 Subject: [PATCH 1243/3326] iommu/arm-smmu-v3: Don't free page table ops twice commit 57d72e159b60456c8bb281736c02ddd3164037aa upstream. Kasan reports a double free when finalise_stage_fn fails: the io_pgtable ops are freed by arm_smmu_domain_finalise and then again by arm_smmu_domain_free. Prevent this by leaving pgtbl_ops empty on failure. Fixes: 48ec83bcbcf5 ("iommu/arm-smmu: Add initial driver support for ARM SMMUv3 devices") Reviewed-by: Robin Murphy Signed-off-by: Jean-Philippe Brucker Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- drivers/iommu/arm-smmu-v3.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index e67ba6c40faff0..1ff10879438425 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1611,13 +1611,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; domain->geometry.aperture_end = (1UL << ias) - 1; domain->geometry.force_aperture = true; - smmu_domain->pgtbl_ops = pgtbl_ops; ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); - if (ret < 0) + if (ret < 0) { free_io_pgtable_ops(pgtbl_ops); + return ret; + } - return ret; + smmu_domain->pgtbl_ops = pgtbl_ops; + return 0; } static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) From 986128f3a74b45c6d7e639c950b69cdc933d1686 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 2 Jan 2018 12:33:14 +0000 Subject: [PATCH 1244/3326] iommu/arm-smmu-v3: Cope with duplicated Stream IDs commit 563b5cbe334e9503ab2b234e279d500fc4f76018 upstream. For PCI devices behind an aliasing PCIe-to-PCI/X bridge, the bridge alias to DevFn 0.0 on the subordinate bus may match the original RID of the device, resulting in the same SID being present in the device's fwspec twice. This causes trouble later in arm_smmu_write_strtab_ent() when we wind up visiting the STE a second time and find it already live. Avoid the issue by giving arm_smmu_install_ste_for_dev() the cleverness to skip over duplicates. It seems mildly counterintuitive compared to preventing the duplicates from existing in the first place, but since the DT and ACPI probe paths build their fwspecs differently, this is actually the cleanest and most self-contained way to deal with it. Fixes: 8f78515425da ("iommu/arm-smmu: Implement of_xlate() for SMMUv3") Reported-by: Tomasz Nowicki Tested-by: Tomasz Nowicki Tested-by: Jayachandran C. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- drivers/iommu/arm-smmu-v3.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 1ff10879438425..8f7a3c00b6cf3d 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1646,7 +1646,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) { - int i; + int i, j; struct arm_smmu_master_data *master = fwspec->iommu_priv; struct arm_smmu_device *smmu = master->smmu; @@ -1654,6 +1654,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) u32 sid = fwspec->ids[i]; __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); + /* Bridged PCI devices may end up with duplicated IDs */ + for (j = 0; j < i; j++) + if (fwspec->ids[j] == sid) + break; + if (j < i) + continue; + arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste); } } From d17237bbdf70eb2a30301eb94533fe712ba96cb0 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Fri, 8 Dec 2017 08:26:58 -0800 Subject: [PATCH 1245/3326] ARC: uaccess: dont use "l" gcc inline asm constraint modifier commit 79435ac78d160e4c245544d457850a56f805ac0d upstream. This used to setup the LP_COUNT register automatically, but now has been removed. There was an earlier fix 3c7c7a2fc8811 which fixed instance in delay.h but somehow missed this one as gcc change had not made its way into production toolchains and was not pedantic as it is now ! Signed-off-by: Vineet Gupta Signed-off-by: Greg Kroah-Hartman --- arch/arc/include/asm/uaccess.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index f35974ee7264a1..c9173c02081c0c 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -668,6 +668,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) return 0; __asm__ __volatile__( + " mov lp_count, %5 \n" " lp 3f \n" "1: ldb.ab %3, [%2, 1] \n" " breq.d %3, 0, 3f \n" @@ -684,8 +685,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) " .word 1b, 4b \n" " .previous \n" : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) - : "g"(-EFAULT), "l"(count) - : "memory"); + : "g"(-EFAULT), "r"(count) + : "lp_count", "lp_start", "lp_end", "memory"); return res; } From 49b52457e1fc9cbf21ba7a3e24fcc4541e0c08d2 Mon Sep 17 00:00:00 2001 From: John Sperbeck Date: Sun, 31 Dec 2017 21:24:58 -0800 Subject: [PATCH 1246/3326] powerpc/mm: Fix SEGV on mapped region to return SEGV_ACCERR commit ecb101aed86156ec7cd71e5dca668e09146e6994 upstream. The recent refactoring of the powerpc page fault handler in commit c3350602e876 ("powerpc/mm: Make bad_area* helper functions") caused access to protected memory regions to indicate SEGV_MAPERR instead of the traditional SEGV_ACCERR in the si_code field of a user-space signal handler. This can confuse debug libraries that temporarily change the protection of memory regions, and expect to use SEGV_ACCERR as an indication to restore access to a region. This commit restores the previous behavior. The following program exhibits the issue: $ ./repro read || echo "FAILED" $ ./repro write || echo "FAILED" $ ./repro exec || echo "FAILED" #include #include #include #include #include #include #include static void segv_handler(int n, siginfo_t *info, void *arg) { _exit(info->si_code == SEGV_ACCERR ? 0 : 1); } int main(int argc, char **argv) { void *p = NULL; struct sigaction act = { .sa_sigaction = segv_handler, .sa_flags = SA_SIGINFO, }; assert(argc == 2); p = mmap(NULL, getpagesize(), (strcmp(argv[1], "write") == 0) ? PROT_READ : 0, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); assert(p != MAP_FAILED); assert(sigaction(SIGSEGV, &act, NULL) == 0); if (strcmp(argv[1], "read") == 0) printf("%c", *(unsigned char *)p); else if (strcmp(argv[1], "write") == 0) *(unsigned char *)p = 0; else if (strcmp(argv[1], "exec") == 0) ((void (*)(void))p)(); return 1; /* failed to generate SEGV */ } Fixes: c3350602e876 ("powerpc/mm: Make bad_area* helper functions") Signed-off-by: John Sperbeck Acked-by: Benjamin Herrenschmidt [mpe: Add commit references in change log] Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/mm/fault.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 4797d08581cec3..6e1e3903538065 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -145,6 +145,11 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address) return __bad_area(regs, address, SEGV_MAPERR); } +static noinline int bad_access(struct pt_regs *regs, unsigned long address) +{ + return __bad_area(regs, address, SEGV_ACCERR); +} + static int do_sigbus(struct pt_regs *regs, unsigned long address, unsigned int fault) { @@ -490,7 +495,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, good_area: if (unlikely(access_error(is_write, is_exec, vma))) - return bad_area(regs, address); + return bad_access(regs, address); /* * If for any reason at all we couldn't handle the fault, From 2df832f7aa68dd1cf454d6fa165f1c61a3cd04e5 Mon Sep 17 00:00:00 2001 From: Aaron Ma Date: Sat, 25 Nov 2017 16:48:41 -0800 Subject: [PATCH 1247/3326] Input: elantech - add new icbody type 15 commit 10d900303f1c3a821eb0bef4e7b7ece16768fba4 upstream. The touchpad of Lenovo Thinkpad L480 reports it's version as 15. Signed-off-by: Aaron Ma Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/mouse/elantech.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index b84cd978fce2da..a4aaa748e987f7 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd) case 5: etd->hw_version = 3; break; - case 6 ... 14: + case 6 ... 15: etd->hw_version = 4; break; default: From 46789641800ca2077acb66c6cbe8e2ce7575113c Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Thu, 30 Nov 2017 16:46:40 -0600 Subject: [PATCH 1248/3326] x86/microcode/AMD: Add support for fam17h microcode loading commit f4e9b7af0cd58dd039a0fb2cd67d57cea4889abf upstream. The size for the Microcode Patch Block (MPB) for an AMD family 17h processor is 3200 bytes. Add a #define for fam17h so that it does not default to 2048 bytes and fail a microcode load/update. Signed-off-by: Tom Lendacky Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Link: https://lkml.kernel.org/r/20171130224640.15391.40247.stgit@tlendack-t1.amdoffice.net Signed-off-by: Ingo Molnar Cc: Alice Ferrazzi Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/microcode/amd.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index c6daec4bdba5b1..330b8462d426fa 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -470,6 +470,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, #define F14H_MPB_MAX_SIZE 1824 #define F15H_MPB_MAX_SIZE 4096 #define F16H_MPB_MAX_SIZE 3458 +#define F17H_MPB_MAX_SIZE 3200 switch (family) { case 0x14: @@ -481,6 +482,9 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, case 0x16: max_size = F16H_MPB_MAX_SIZE; break; + case 0x17: + max_size = F17H_MPB_MAX_SIZE; + break; default: max_size = F1XH_MPB_MAX_SIZE; break; From f5edee88ad430356e5c6b1c309782cce3f736272 Mon Sep 17 00:00:00 2001 From: John Johansen Date: Thu, 7 Dec 2017 00:28:27 -0800 Subject: [PATCH 1249/3326] apparmor: fix regression in mount mediation when feature set is pinned MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 5b9f57cf47b87f07210875d6a24776b4496b818d upstream. When the mount code was refactored for Labels it was not correctly updated to check whether policy supported mediation of the mount class. This causes a regression when the kernel feature set is reported as supporting mount and policy is pinned to a feature set that does not support mount mediation. BugLink: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=882697#41 Fixes: 2ea3ffb7782a ("apparmor: add mount mediation") Reported-by: Fabian Grünbichler Signed-off-by: John Johansen Signed-off-by: Greg Kroah-Hartman --- security/apparmor/mount.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c index 82a64b58041d2a..e395137ecff150 100644 --- a/security/apparmor/mount.c +++ b/security/apparmor/mount.c @@ -330,6 +330,9 @@ static int match_mnt_path_str(struct aa_profile *profile, AA_BUG(!mntpath); AA_BUG(!buffer); + if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT)) + return 0; + error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer, &mntpnt, &info, profile->disconnected); if (error) @@ -381,6 +384,9 @@ static int match_mnt(struct aa_profile *profile, const struct path *path, AA_BUG(!profile); AA_BUG(devpath && !devbuffer); + if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT)) + return 0; + if (devpath) { error = aa_path_name(devpath, path_flags(profile, devpath), devbuffer, &devname, &info, @@ -559,6 +565,9 @@ static int profile_umount(struct aa_profile *profile, struct path *path, AA_BUG(!profile); AA_BUG(!path); + if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT)) + return 0; + error = aa_path_name(path, path_flags(profile, path), buffer, &name, &info, profile->disconnected); if (error) @@ -614,7 +623,8 @@ static struct aa_label *build_pivotroot(struct aa_profile *profile, AA_BUG(!new_path); AA_BUG(!old_path); - if (profile_unconfined(profile)) + if (profile_unconfined(profile) || + !PROFILE_MEDIATES(profile, AA_CLASS_MOUNT)) return aa_get_newest_label(&profile->label); error = aa_path_name(old_path, path_flags(profile, old_path), From fb510265edaf4809c050e620231b6ea69507fe82 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Tue, 2 Jan 2018 20:36:44 +0100 Subject: [PATCH 1250/3326] parisc: Fix alignment of pa_tlb_lock in assembly on 32-bit SMP kernel commit 88776c0e70be0290f8357019d844aae15edaa967 upstream. Qemu for PARISC reported on a 32bit SMP parisc kernel strange failures about "Not-handled unaligned insn 0x0e8011d6 and 0x0c2011c9." Those opcodes evaluate to the ldcw() assembly instruction which requires (on 32bit) an alignment of 16 bytes to ensure atomicity. As it turns out, qemu is correct and in our assembly code in entry.S and pacache.S we don't pay attention to the required alignment. This patch fixes the problem by aligning the lock offset in assembly code in the same manner as we do in our C-code. Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman --- arch/parisc/include/asm/ldcw.h | 2 ++ arch/parisc/kernel/entry.S | 13 +++++++++++-- arch/parisc/kernel/pacache.S | 9 +++++++-- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h index dd5a08aaa4da74..3eb4bfc1fb3654 100644 --- a/arch/parisc/include/asm/ldcw.h +++ b/arch/parisc/include/asm/ldcw.h @@ -12,6 +12,7 @@ for the semaphore. */ #define __PA_LDCW_ALIGNMENT 16 +#define __PA_LDCW_ALIGN_ORDER 4 #define __ldcw_align(a) ({ \ unsigned long __ret = (unsigned long) &(a)->lock[0]; \ __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ @@ -29,6 +30,7 @@ ldcd). */ #define __PA_LDCW_ALIGNMENT 4 +#define __PA_LDCW_ALIGN_ORDER 2 #define __ldcw_align(a) (&(a)->slock) #define __LDCW "ldcw,co" diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index f3cecf5117cf8a..e95207c0565eb1 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -46,6 +47,14 @@ #endif .import pa_tlb_lock,data + .macro load_pa_tlb_lock reg +#if __PA_LDCW_ALIGNMENT > 4 + load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg + depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg +#else + load32 PA(pa_tlb_lock), \reg +#endif + .endm /* space_to_prot macro creates a prot id from a space id */ @@ -457,7 +466,7 @@ .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault #ifdef CONFIG_SMP cmpib,COND(=),n 0,\spc,2f - load32 PA(pa_tlb_lock),\tmp + load_pa_tlb_lock \tmp 1: LDCW 0(\tmp),\tmp1 cmpib,COND(=) 0,\tmp1,1b nop @@ -480,7 +489,7 @@ /* Release pa_tlb_lock lock. */ .macro tlb_unlock1 spc,tmp #ifdef CONFIG_SMP - load32 PA(pa_tlb_lock),\tmp + load_pa_tlb_lock \tmp tlb_unlock0 \spc,\tmp #endif .endm diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index adf7187f89515e..2d40c4ff3f6918 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -36,6 +36,7 @@ #include #include #include +#include #include .text @@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local) .macro tlb_lock la,flags,tmp #ifdef CONFIG_SMP - ldil L%pa_tlb_lock,%r1 - ldo R%pa_tlb_lock(%r1),\la +#if __PA_LDCW_ALIGNMENT > 4 + load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la + depi 0,31,__PA_LDCW_ALIGN_ORDER, \la +#else + load32 pa_tlb_lock, \la +#endif rsm PSW_SM_I,\flags 1: LDCW 0(\la),\tmp cmpib,<>,n 0,\tmp,3f From a0fdea2a407e0f2503f6b80b1a89c44fb58d64e9 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Fri, 5 Jan 2018 21:55:38 +0100 Subject: [PATCH 1251/3326] parisc: qemu idle sleep support commit 310d82784fb4d60c80569f5ca9f53a7f3bf1d477 upstream. Add qemu idle sleep support when running under qemu with SeaBIOS PDC firmware. Like the power architecture we use the "or" assembler instructions, which translate to nops on real hardware, to indicate that qemu shall idle sleep. Signed-off-by: Helge Deller Cc: Richard Henderson Signed-off-by: Greg Kroah-Hartman --- arch/parisc/kernel/process.c | 39 ++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 30f92391a93ef6..cad3e8661cd6cf 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -183,6 +184,44 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) return 1; } +/* + * Idle thread support + * + * Detect when running on QEMU with SeaBIOS PDC Firmware and let + * QEMU idle the host too. + */ + +int running_on_qemu __read_mostly; + +void __cpuidle arch_cpu_idle_dead(void) +{ + /* nop on real hardware, qemu will offline CPU. */ + asm volatile("or %%r31,%%r31,%%r31\n":::); +} + +void __cpuidle arch_cpu_idle(void) +{ + local_irq_enable(); + + /* nop on real hardware, qemu will idle sleep. */ + asm volatile("or %%r10,%%r10,%%r10\n":::); +} + +static int __init parisc_idle_init(void) +{ + const char *marker; + + /* check QEMU/SeaBIOS marker in PAGE0 */ + marker = (char *) &PAGE0->pad0; + running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0); + + if (!running_on_qemu) + cpu_idle_poll_ctrl(1); + + return 0; +} +arch_initcall(parisc_idle_init); + /* * Copy architecture-specific thread state */ From 756ac0b046182487a2b813dd1dadb17e0d7b8b67 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Mon, 18 Dec 2017 11:32:45 +0100 Subject: [PATCH 1252/3326] mtd: nand: pxa3xx: Fix READOOB implementation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit fee4380f368e84ed216b62ccd2fbc4126f2bf40b upstream. In the current driver, OOB bytes are accessed in raw mode, and when a page access is done with NDCR_SPARE_EN set and NDCR_ECC_EN cleared, the driver must read the whole spare area (64 bytes in case of a 2k page, 16 bytes for a 512 page). The driver was only reading the free OOB bytes, which was leaving some unread data in the FIFO and was somehow leading to a timeout. We could patch the driver to read ->spare_size + ->ecc_size instead of just ->spare_size when READOOB is requested, but we'd better make in-band and OOB accesses consistent. Since the driver is always accessing in-band data in non-raw mode (with the ECC engine enabled), we should also access OOB data in this mode. That's particularly useful when using the BCH engine because in this mode the free OOB bytes are also ECC protected. Fixes: 43bcfd2bb24a ("mtd: nand: pxa3xx: Add driver-specific ECC BCH support") Reported-by: Sean Nyekjær Tested-by: Willy Tarreau Signed-off-by: Boris Brezillon Acked-by: Ezequiel Garcia Tested-by: Sean Nyekjaer Acked-by: Robert Jarzmik Signed-off-by: Richard Weinberger Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/pxa3xx_nand.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 85cff68643e0bd..125b744c9c285c 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -950,6 +950,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command) switch (command) { case NAND_CMD_READ0: + case NAND_CMD_READOOB: case NAND_CMD_PAGEPROG: info->use_ecc = 1; break; From 853bcadeefe18ee0661a302811a17644b1f9e80b Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Fri, 15 Dec 2017 13:14:31 +0100 Subject: [PATCH 1253/3326] KVM: s390: fix cmma migration for multiple memory slots commit 32aa144fc32abfcbf7140f473dfbd94c5b9b4105 upstream. When multiple memory slots are present the cmma migration code does not allocate enough memory for the bitmap. The memory slots are sorted in reverse order, so we must use gfn and size of slot[0] instead of the last one. Signed-off-by: Christian Borntraeger Reviewed-by: Claudio Imbrenda Fixes: 190df4a212a7 (KVM: s390: CMMA tracking, ESSA emulation, migration mode) Reviewed-by: Cornelia Huck Signed-off-by: Greg Kroah-Hartman --- arch/s390/kvm/kvm-s390.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 40d0a1a97889b0..b87a930c2201bc 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -794,11 +794,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) if (kvm->arch.use_cmma) { /* - * Get the last slot. They should be sorted by base_gfn, so the - * last slot is also the one at the end of the address space. - * We have verified above that at least one slot is present. + * Get the first slot. They are reverse sorted by base_gfn, so + * the first slot is also the one at the end of the address + * space. We have verified above that at least one slot is + * present. */ - ms = slots->memslots + slots->used_slots - 1; + ms = slots->memslots; /* round up so we only use full longs */ ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG); /* allocate enough bytes to store all the bits */ From 77bbeea7811f0857952d3f096438ce2e1bb16f1a Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 21 Dec 2017 09:18:22 +0100 Subject: [PATCH 1254/3326] KVM: s390: prevent buffer overrun on memory hotplug during migration commit c2cf265d860882b51a200e4a7553c17827f2b730 upstream. We must not go beyond the pre-allocated buffer. This can happen when a new memory slot is added during migration. Reported-by: David Hildenbrand Signed-off-by: Christian Borntraeger Fixes: 190df4a212a7 (KVM: s390: CMMA tracking, ESSA emulation, migration mode) Reviewed-by: Cornelia Huck Reviewed-by: David Hildenbrand Signed-off-by: Greg Kroah-Hartman --- arch/s390/kvm/priv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 5b25287f449b7e..7bd3a59232f014 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -1009,7 +1009,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc) cbrlo[entries] = gfn << PAGE_SHIFT; } - if (orc) { + if (orc && gfn < ms->bitmap_size) { /* increment only if we are really flipping the bit to 1 */ if (!test_and_set_bit(gfn, ms->pgste_bitmap)) atomic64_inc(&ms->dirty_pages); From b8447222eb207d5a5ec20a0f357065963dabdcd0 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 10 Jan 2018 09:31:23 +0100 Subject: [PATCH 1255/3326] Linux 4.14.13 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 20f7d4de0f1c30..a67c5179052a68 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 12 +SUBLEVEL = 13 EXTRAVERSION = NAME = Petit Gorille From 883a082e8bdbcacfa6594ca8f9198491466cba2c Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Wed, 6 Dec 2017 09:27:30 -0800 Subject: [PATCH 1256/3326] dm bufio: fix shrinker scans when (nr_to_scan < retain_target) commit fbc7c07ec23c040179384a1f16b62b6030eb6bdd upstream. When system is under memory pressure it is observed that dm bufio shrinker often reclaims only one buffer per scan. This change fixes the following two issues in dm bufio shrinker that cause this behavior: 1. ((nr_to_scan - freed) <= retain_target) condition is used to terminate slab scan process. This assumes that nr_to_scan is equal to the LRU size, which might not be correct because do_shrink_slab() in vmscan.c calculates nr_to_scan using multiple inputs. As a result when nr_to_scan is less than retain_target (64) the scan will terminate after the first iteration, effectively reclaiming one buffer per scan and making scans very inefficient. This hurts vmscan performance especially because mutex is acquired/released every time dm_bufio_shrink_scan() is called. New implementation uses ((LRU size - freed) <= retain_target) condition for scan termination. LRU size can be safely determined inside __scan() because this function is called after dm_bufio_lock(). 2. do_shrink_slab() uses value returned by dm_bufio_shrink_count() to determine number of freeable objects in the slab. However dm_bufio always retains retain_target buffers in its LRU and will terminate a scan when this mark is reached. Therefore returning the entire LRU size from dm_bufio_shrink_count() is misleading because that does not represent the number of freeable objects that slab will reclaim during a scan. Returning (LRU size - retain_target) better represents the number of freeable objects in the slab. This way do_shrink_slab() returns 0 when (LRU size < retain_target) and vmscan will not try to scan this shrinker avoiding scans that will not reclaim any memory. Test: tested using Android device running /system/extras/alloc-stress that generates memory pressure and causes intensive shrinker scans Signed-off-by: Suren Baghdasaryan Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-bufio.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 8e3adcb46851e8..6d416fdc25cb2c 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1611,7 +1611,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, int l; struct dm_buffer *b, *tmp; unsigned long freed = 0; - unsigned long count = nr_to_scan; + unsigned long count = c->n_buffers[LIST_CLEAN] + + c->n_buffers[LIST_DIRTY]; unsigned long retain_target = get_retain_buffers(c); for (l = 0; l < LIST_SIZE; l++) { @@ -1647,8 +1648,11 @@ static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); + unsigned long count = ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]); + unsigned long retain_target = get_retain_buffers(c); - return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]); + return (count < retain_target) ? 0 : (count - retain_target); } /* From 653c41ac4729261cb356ee1aff0f3f4f342be1eb Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Thu, 14 Dec 2017 17:40:50 -0800 Subject: [PATCH 1257/3326] KVM: Fix stack-out-of-bounds read in write_mmio MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit e39d200fa5bf5b94a0948db0dae44c1b73b84a56 upstream. Reported by syzkaller: BUG: KASAN: stack-out-of-bounds in write_mmio+0x11e/0x270 [kvm] Read of size 8 at addr ffff8803259df7f8 by task syz-executor/32298 CPU: 6 PID: 32298 Comm: syz-executor Tainted: G OE 4.15.0-rc2+ #18 Hardware name: LENOVO ThinkCentre M8500t-N000/SHARKBAY, BIOS FBKTC1AUS 02/16/2016 Call Trace: dump_stack+0xab/0xe1 print_address_description+0x6b/0x290 kasan_report+0x28a/0x370 write_mmio+0x11e/0x270 [kvm] emulator_read_write_onepage+0x311/0x600 [kvm] emulator_read_write+0xef/0x240 [kvm] emulator_fix_hypercall+0x105/0x150 [kvm] em_hypercall+0x2b/0x80 [kvm] x86_emulate_insn+0x2b1/0x1640 [kvm] x86_emulate_instruction+0x39a/0xb90 [kvm] handle_exception+0x1b4/0x4d0 [kvm_intel] vcpu_enter_guest+0x15a0/0x2640 [kvm] kvm_arch_vcpu_ioctl_run+0x549/0x7d0 [kvm] kvm_vcpu_ioctl+0x479/0x880 [kvm] do_vfs_ioctl+0x142/0x9a0 SyS_ioctl+0x74/0x80 entry_SYSCALL_64_fastpath+0x23/0x9a The path of patched vmmcall will patch 3 bytes opcode 0F 01 C1(vmcall) to the guest memory, however, write_mmio tracepoint always prints 8 bytes through *(u64 *)val since kvm splits the mmio access into 8 bytes. This leaks 5 bytes from the kernel stack (CVE-2017-17741). This patch fixes it by just accessing the bytes which we operate on. Before patch: syz-executor-5567 [007] .... 51370.561696: kvm_mmio: mmio write len 3 gpa 0x10 val 0x1ffff10077c1010f After patch: syz-executor-13416 [002] .... 51302.299573: kvm_mmio: mmio write len 3 gpa 0x10 val 0xc1010f Reported-by: Dmitry Vyukov Reviewed-by: Darren Kenny Reviewed-by: Marc Zyngier Tested-by: Marc Zyngier Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Marc Zyngier Cc: Christoffer Dall Signed-off-by: Wanpeng Li Signed-off-by: Paolo Bonzini Cc: Mathieu Desnoyers Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/x86.c | 8 ++++---- include/trace/events/kvm.h | 7 +++++-- virt/kvm/arm/mmio.c | 6 +++--- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 075619a92ce71d..575c8953cc9a35 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4362,7 +4362,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) addr, n, v)) && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) break; - trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); + trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v); handled += n; addr += n; len -= n; @@ -4621,7 +4621,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) { if (vcpu->mmio_read_completed) { trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, - vcpu->mmio_fragments[0].gpa, *(u64 *)val); + vcpu->mmio_fragments[0].gpa, val); vcpu->mmio_read_completed = 0; return 1; } @@ -4643,14 +4643,14 @@ static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) { - trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); + trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val); return vcpu_mmio_write(vcpu, gpa, bytes, val); } static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { - trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); + trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL); return X86EMUL_IO_NEEDED; } diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index e4b0b8e099325f..2c735a3e66133f 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -211,7 +211,7 @@ TRACE_EVENT(kvm_ack_irq, { KVM_TRACE_MMIO_WRITE, "write" } TRACE_EVENT(kvm_mmio, - TP_PROTO(int type, int len, u64 gpa, u64 val), + TP_PROTO(int type, int len, u64 gpa, void *val), TP_ARGS(type, len, gpa, val), TP_STRUCT__entry( @@ -225,7 +225,10 @@ TRACE_EVENT(kvm_mmio, __entry->type = type; __entry->len = len; __entry->gpa = gpa; - __entry->val = val; + __entry->val = 0; + if (val) + memcpy(&__entry->val, val, + min_t(u32, sizeof(__entry->val), len)); ), TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx", diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c index b6e715fd3c90af..dac7ceb1a67774 100644 --- a/virt/kvm/arm/mmio.c +++ b/virt/kvm/arm/mmio.c @@ -112,7 +112,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) } trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, - data); + &data); data = vcpu_data_host_to_guest(vcpu, data, len); vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); } @@ -182,14 +182,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt), len); - trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data); + trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data); kvm_mmio_write_buf(data_buf, len, data); ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len, data_buf); } else { trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len, - fault_ipa, 0); + fault_ipa, NULL); ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len, data_buf); From b043ea189d0fbd2f7a0b8d177109f60f5cd767f2 Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Sat, 2 Dec 2017 18:48:52 +0100 Subject: [PATCH 1258/3326] can: vxcan: improve handling of missing peer name attribute commit b4c2951a4833e66f1bbfe65ddcd4fdcdfafe5e8f upstream. Picking up the patch from Serhey Popovych (commit 191cdb3822e5df6b3c8, "veth: Be more robust on network device creation when no attributes"). When the peer name attribute is not provided the former implementation tries to register the given device name twice ... which leads to -EEXIST. If only one device name is given apply an automatic generated and valid name for the peer. Cc: Serhey Popovych Signed-off-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/vxcan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 8404e8852a0f96..b4c4a2c764378e 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, tbp = peer_tb; } - if (tbp[IFLA_IFNAME]) { + if (ifmp && tbp[IFLA_IFNAME]) { nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); name_assign_type = NET_NAME_USER; } else { From 83f8d47b31f52b40f0158581c41c086c00fcad5b Mon Sep 17 00:00:00 2001 From: Wolfgang Grandegger Date: Wed, 13 Dec 2017 19:52:23 +0100 Subject: [PATCH 1259/3326] can: gs_usb: fix return value of the "set_bittiming" callback commit d5b42e6607661b198d8b26a0c30969605b1bf5c7 upstream. The "set_bittiming" callback treats a positive return value as error! For that reason "can_changelink()" will quit silently after setting the bittiming values without processing ctrlmode, restart-ms, etc. Signed-off-by: Wolfgang Grandegger Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/usb/gs_usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 68ac3e88a8cecb..8bf80ad9dc44cf 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev) dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)", rc); - return rc; + return (rc > 0) ? 0 : rc; } static void gs_usb_xmit_callback(struct urb *urb) From 68807cb2b38b2e10d01e5c033eb818836b250fd1 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 3 Jan 2018 13:39:15 -0800 Subject: [PATCH 1260/3326] IB/srpt: Disable RDMA access by the initiator commit bec40c26041de61162f7be9d2ce548c756ce0f65 upstream. With the SRP protocol all RDMA operations are initiated by the target. Since no RDMA operations are initiated by the initiator, do not grant the initiator permission to submit RDMA reads or writes to the target. Signed-off-by: Bart Van Assche Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/ulp/srpt/ib_srpt.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 95178b4e3565d4..fbc142a7b883d1 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1000,8 +1000,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) return -ENOMEM; attr->qp_state = IB_QPS_INIT; - attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | - IB_ACCESS_REMOTE_WRITE; + attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE; attr->port_num = ch->sport->port; attr->pkey_index = 0; From 14af4f4c21968168209b37838bc8c571b562a0a6 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 3 Jan 2018 13:39:16 -0800 Subject: [PATCH 1261/3326] IB/srpt: Fix ACL lookup during login commit a1ffa4670cb97ae3a4b3e8535d88be5f643f7c3b upstream. Make sure that the initiator port GUID is stored in ch->ini_guid. Note: when initiating a connection sgid and dgid members in struct sa_path_rec represent the source and destination GIDs. When accepting a connection however sgid represents the destination GID and dgid the source GID. Fixes: commit 2bce1a6d2209 ("IB/srpt: Accept GUIDs as port names") Signed-off-by: Bart Van Assche Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/ulp/srpt/ib_srpt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index fbc142a7b883d1..ee578fa713c28d 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1991,7 +1991,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, goto destroy_ib; } - guid = (__be16 *)¶m->primary_path->sgid.global.interface_id; + guid = (__be16 *)¶m->primary_path->dgid.global.interface_id; snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x", be16_to_cpu(guid[0]), be16_to_cpu(guid[1]), be16_to_cpu(guid[2]), be16_to_cpu(guid[3])); From a8caed0170147c55daf79793d72ed01b16610cf1 Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Mon, 27 Nov 2017 09:33:03 +0000 Subject: [PATCH 1262/3326] MIPS: Validate PR_SET_FP_MODE prctl(2) requests against the ABI of the task commit b67336eee3fcb8ecedc6c13e2bf88aacfa3151e2 upstream. Fix an API loophole introduced with commit 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS"), where the caller of prctl(2) is incorrectly allowed to make a change to CP0.Status.FR or CP0.Config5.FRE register bits even if CONFIG_MIPS_O32_FP64_SUPPORT has not been enabled, despite that an executable requesting the mode requested via ELF file annotation would not be allowed to run in the first place, or for n64 and n64 ABI tasks which do not have non-default modes defined at all. Add suitable checks to `mips_set_process_fp_mode' and bail out if an invalid mode change has been requested for the ABI in effect, even if the FPU hardware or emulation would otherwise allow it. Always succeed however without taking any further action if the mode requested is the same as one already in effect, regardless of whether any mode change, should it be requested, would actually be allowed for the task concerned. Signed-off-by: Maciej W. Rozycki Fixes: 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS") Reviewed-by: Paul Burton Cc: James Hogan Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/17800/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/process.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index c5ff6bfe2825b9..2f2d176396aa51 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -705,6 +705,18 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) struct task_struct *t; int max_users; + /* If nothing to change, return right away, successfully. */ + if (value == mips_get_process_fp_mode(task)) + return 0; + + /* Only accept a mode change if 64-bit FP enabled for o32. */ + if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT)) + return -EOPNOTSUPP; + + /* And only for o32 tasks. */ + if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS)) + return -EOPNOTSUPP; + /* Check the value is valid */ if (value & ~known_bits) return -EOPNOTSUPP; From bc511ace4e306b0b0451a6169efe3f5e85d9d6c4 Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Mon, 11 Dec 2017 22:51:35 +0000 Subject: [PATCH 1263/3326] MIPS: Factor out NT_PRFPREG regset access helpers commit a03fe72572c12e98f4173f8a535f32468e48b6ec upstream. In preparation to fix a commit 72b22bbad1e7 ("MIPS: Don't assume 64-bit FP registers for FP regset") FCSR access regression factor out NT_PRFPREG regset access helpers for the non-MSA and the MSA variants respectively, to avoid having to deal with excessive indentation in the actual fix. No functional change, however use `target->thread.fpu.fpr[0]' rather than `target->thread.fpu.fpr[i]' for FGR holding type size determination as there's no `i' variable to refer to anymore, and for the factored out `i' variable declaration use `unsigned int' rather than `unsigned' as its type, following the common style. Signed-off-by: Maciej W. Rozycki Fixes: 72b22bbad1e7 ("MIPS: Don't assume 64-bit FP registers for FP regset") Cc: James Hogan Cc: Paul Burton Cc: Alex Smith Cc: Dave Martin Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/17925/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/ptrace.c | 108 +++++++++++++++++++++++++++++--------- 1 file changed, 83 insertions(+), 25 deletions(-) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 5a09c2901a7605..c934e6d4bdcb3c 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -410,25 +410,36 @@ static int gpr64_set(struct task_struct *target, #endif /* CONFIG_64BIT */ -static int fpr_get(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) +/* + * Copy the floating-point context to the supplied NT_PRFPREG buffer, + * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots + * correspond 1:1 to buffer slots. + */ +static int fpr_get_fpa(struct task_struct *target, + unsigned int *pos, unsigned int *count, + void **kbuf, void __user **ubuf) { - unsigned i; - int err; - u64 fpr_val; - - /* XXX fcr31 */ + return user_regset_copyout(pos, count, kbuf, ubuf, + &target->thread.fpu, + 0, sizeof(elf_fpregset_t)); +} - if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.fpu, - 0, sizeof(elf_fpregset_t)); +/* + * Copy the floating-point context to the supplied NT_PRFPREG buffer, + * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's + * general register slots are copied to buffer slots. + */ +static int fpr_get_msa(struct task_struct *target, + unsigned int *pos, unsigned int *count, + void **kbuf, void __user **ubuf) +{ + unsigned int i; + u64 fpr_val; + int err; for (i = 0; i < NUM_FPU_REGS; i++) { fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); - err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + err = user_regset_copyout(pos, count, kbuf, ubuf, &fpr_val, i * sizeof(elf_fpreg_t), (i + 1) * sizeof(elf_fpreg_t)); if (err) @@ -438,27 +449,54 @@ static int fpr_get(struct task_struct *target, return 0; } -static int fpr_set(struct task_struct *target, +/* Copy the floating-point context to the supplied NT_PRFPREG buffer. */ +static int fpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, - const void *kbuf, const void __user *ubuf) + void *kbuf, void __user *ubuf) { - unsigned i; int err; - u64 fpr_val; /* XXX fcr31 */ - init_fp_ctx(target); + if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) + err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf); + else + err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf); + + return err; +} - if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) - return user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.fpu, - 0, sizeof(elf_fpregset_t)); +/* + * Copy the supplied NT_PRFPREG buffer to the floating-point context, + * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP + * context's general register slots. + */ +static int fpr_set_fpa(struct task_struct *target, + unsigned int *pos, unsigned int *count, + const void **kbuf, const void __user **ubuf) +{ + return user_regset_copyin(pos, count, kbuf, ubuf, + &target->thread.fpu, + 0, sizeof(elf_fpregset_t)); +} + +/* + * Copy the supplied NT_PRFPREG buffer to the floating-point context, + * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64 + * bits only of FP context's general register slots. + */ +static int fpr_set_msa(struct task_struct *target, + unsigned int *pos, unsigned int *count, + const void **kbuf, const void __user **ubuf) +{ + unsigned int i; + u64 fpr_val; + int err; BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); - for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) { - err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + for (i = 0; i < NUM_FPU_REGS && *count >= sizeof(elf_fpreg_t); i++) { + err = user_regset_copyin(pos, count, kbuf, ubuf, &fpr_val, i * sizeof(elf_fpreg_t), (i + 1) * sizeof(elf_fpreg_t)); if (err) @@ -469,6 +507,26 @@ static int fpr_set(struct task_struct *target, return 0; } +/* Copy the supplied NT_PRFPREG buffer to the floating-point context. */ +static int fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int err; + + /* XXX fcr31 */ + + init_fp_ctx(target); + + if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) + err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf); + else + err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf); + + return err; +} + enum mips_regset { REGSET_GPR, REGSET_FPR, From a38f387c48c3b364c927e9d983b116824820ed38 Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Mon, 11 Dec 2017 22:52:15 +0000 Subject: [PATCH 1264/3326] MIPS: Guard against any partial write attempt with PTRACE_SETREGSET commit dc24d0edf33c3e15099688b6bbdf7bdc24bf6e91 upstream. Complement commit d614fd58a283 ("mips/ptrace: Preserve previous registers for short regset write") and ensure that no partial register write attempt is made with PTRACE_SETREGSET, as we do not preinitialize any temporaries used to hold incoming register data and consequently random data could be written. It is the responsibility of the caller, such as `ptrace_regset', to arrange for writes to span whole registers only, so here we only assert that it has indeed happened. Signed-off-by: Maciej W. Rozycki Fixes: 72b22bbad1e7 ("MIPS: Don't assume 64-bit FP registers for FP regset") Cc: James Hogan Cc: Paul Burton Cc: Alex Smith Cc: Dave Martin Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/17926/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/ptrace.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index c934e6d4bdcb3c..7aefd81966812f 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -507,7 +507,15 @@ static int fpr_set_msa(struct task_struct *target, return 0; } -/* Copy the supplied NT_PRFPREG buffer to the floating-point context. */ +/* + * Copy the supplied NT_PRFPREG buffer to the floating-point context. + * + * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', + * which is supposed to have been guaranteed by the kernel before + * calling us, e.g. in `ptrace_regset'. We enforce that requirement, + * so that we can safely avoid preinitializing temporaries for + * partial register writes. + */ static int fpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, @@ -515,6 +523,8 @@ static int fpr_set(struct task_struct *target, { int err; + BUG_ON(count % sizeof(elf_fpreg_t)); + /* XXX fcr31 */ init_fp_ctx(target); From 323429be3b2f464b2e51f0c544e3dd6de4dadb79 Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Mon, 11 Dec 2017 22:53:14 +0000 Subject: [PATCH 1265/3326] MIPS: Consistently handle buffer counter with PTRACE_SETREGSET commit 80b3ffce0196ea50068885d085ff981e4b8396f4 upstream. Update commit d614fd58a283 ("mips/ptrace: Preserve previous registers for short regset write") bug and consistently consume all data supplied to `fpr_set_msa' with the ptrace(2) PTRACE_SETREGSET request, such that a zero data buffer counter is returned where insufficient data has been given to fill a whole number of FP general registers. In reality this is not going to happen, as the caller is supposed to only supply data covering a whole number of registers and it is verified in `ptrace_regset' and again asserted in `fpr_set', however structuring code such that the presence of trailing partial FP general register data causes `fpr_set_msa' to return with a non-zero data buffer counter makes it appear that this trailing data will be used if there are subsequent writes made to FP registers, which is going to be the case with the FCSR once the missing write to that register has been fixed. Fixes: d614fd58a283 ("mips/ptrace: Preserve previous registers for short regset write") Signed-off-by: Maciej W. Rozycki Cc: James Hogan Cc: Paul Burton Cc: Alex Smith Cc: Dave Martin Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/17927/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/ptrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 7aefd81966812f..3c5e0446b28dd5 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -495,7 +495,7 @@ static int fpr_set_msa(struct task_struct *target, int err; BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); - for (i = 0; i < NUM_FPU_REGS && *count >= sizeof(elf_fpreg_t); i++) { + for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) { err = user_regset_copyin(pos, count, kbuf, ubuf, &fpr_val, i * sizeof(elf_fpreg_t), (i + 1) * sizeof(elf_fpreg_t)); From 338ca3563dac0cf96b43936def3a6bc92bbc1c21 Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Mon, 11 Dec 2017 22:54:33 +0000 Subject: [PATCH 1266/3326] MIPS: Fix an FCSR access API regression with NT_PRFPREG and MSA commit be07a6a1188372b6d19a3307ec33211fc9c9439d upstream. Fix a commit 72b22bbad1e7 ("MIPS: Don't assume 64-bit FP registers for FP regset") public API regression, then activated by commit 1db1af84d6df ("MIPS: Basic MSA context switching support"), that caused the FCSR register not to be read or written for CONFIG_CPU_HAS_MSA kernel configurations (regardless of actual presence or absence of the MSA feature in a given processor) with ptrace(2) PTRACE_GETREGSET and PTRACE_SETREGSET requests nor recorded in core dumps. This is because with !CONFIG_CPU_HAS_MSA configurations the whole of `elf_fpregset_t' array is bulk-copied as it is, which includes the FCSR in one half of the last, 33rd slot, whereas with CONFIG_CPU_HAS_MSA configurations array elements are copied individually, and then only the leading 32 FGR slots while the remaining slot is ignored. Correct the code then such that only FGR slots are copied in the respective !MSA and MSA helpers an then the FCSR slot is handled separately in common code. Use `ptrace_setfcr31' to update the FCSR too, so that the read-only mask is respected. Retrieving a correct value of FCSR is important in debugging not only for the human to be able to get the right interpretation of the situation, but for correct operation of GDB as well. This is because the condition code bits in FSCR are used by GDB to determine the location to place a breakpoint at when single-stepping through an FPU branch instruction. If such a breakpoint is placed incorrectly (i.e. with the condition reversed), then it will be missed, likely causing the debuggee to run away from the control of GDB and consequently breaking the process of investigation. Fortunately GDB continues using the older PTRACE_GETFPREGS ptrace(2) request which is unaffected, so the regression only really hits with post-mortem debug sessions using a core dump file, in which case execution, and consequently single-stepping through branches is not possible. Of course core files created by buggy kernels out there will have the value of FCSR recorded clobbered, but such core files cannot be corrected and the person using them simply will have to be aware that the value of FCSR retrieved is not reliable. Which also means we can likely get away without defining a replacement API which would ensure a correct value of FSCR to be retrieved, or none at all. This is based on previous work by Alex Smith, extensively rewritten. Signed-off-by: Alex Smith Signed-off-by: James Hogan Signed-off-by: Maciej W. Rozycki Fixes: 72b22bbad1e7 ("MIPS: Don't assume 64-bit FP registers for FP regset") Cc: Paul Burton Cc: Dave Martin Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/17928/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/ptrace.c | 47 ++++++++++++++++++++++++++++++--------- 1 file changed, 36 insertions(+), 11 deletions(-) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 3c5e0446b28dd5..cad6a442ff8221 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -413,7 +413,7 @@ static int gpr64_set(struct task_struct *target, /* * Copy the floating-point context to the supplied NT_PRFPREG buffer, * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots - * correspond 1:1 to buffer slots. + * correspond 1:1 to buffer slots. Only general registers are copied. */ static int fpr_get_fpa(struct task_struct *target, unsigned int *pos, unsigned int *count, @@ -421,13 +421,14 @@ static int fpr_get_fpa(struct task_struct *target, { return user_regset_copyout(pos, count, kbuf, ubuf, &target->thread.fpu, - 0, sizeof(elf_fpregset_t)); + 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); } /* * Copy the floating-point context to the supplied NT_PRFPREG buffer, * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's - * general register slots are copied to buffer slots. + * general register slots are copied to buffer slots. Only general + * registers are copied. */ static int fpr_get_msa(struct task_struct *target, unsigned int *pos, unsigned int *count, @@ -449,20 +450,29 @@ static int fpr_get_msa(struct task_struct *target, return 0; } -/* Copy the floating-point context to the supplied NT_PRFPREG buffer. */ +/* + * Copy the floating-point context to the supplied NT_PRFPREG buffer. + * Choose the appropriate helper for general registers, and then copy + * the FCSR register separately. + */ static int fpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); int err; - /* XXX fcr31 */ - if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf); else err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf); + if (err) + return err; + + err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.fcr31, + fcr31_pos, fcr31_pos + sizeof(u32)); return err; } @@ -470,7 +480,7 @@ static int fpr_get(struct task_struct *target, /* * Copy the supplied NT_PRFPREG buffer to the floating-point context, * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP - * context's general register slots. + * context's general register slots. Only general registers are copied. */ static int fpr_set_fpa(struct task_struct *target, unsigned int *pos, unsigned int *count, @@ -478,13 +488,14 @@ static int fpr_set_fpa(struct task_struct *target, { return user_regset_copyin(pos, count, kbuf, ubuf, &target->thread.fpu, - 0, sizeof(elf_fpregset_t)); + 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); } /* * Copy the supplied NT_PRFPREG buffer to the floating-point context, * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64 - * bits only of FP context's general register slots. + * bits only of FP context's general register slots. Only general + * registers are copied. */ static int fpr_set_msa(struct task_struct *target, unsigned int *pos, unsigned int *count, @@ -509,6 +520,8 @@ static int fpr_set_msa(struct task_struct *target, /* * Copy the supplied NT_PRFPREG buffer to the floating-point context. + * Choose the appropriate helper for general registers, and then copy + * the FCSR register separately. * * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', * which is supposed to have been guaranteed by the kernel before @@ -521,18 +534,30 @@ static int fpr_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); + u32 fcr31; int err; BUG_ON(count % sizeof(elf_fpreg_t)); - /* XXX fcr31 */ - init_fp_ctx(target); if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf); else err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf); + if (err) + return err; + + if (count > 0) { + err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &fcr31, + fcr31_pos, fcr31_pos + sizeof(u32)); + if (err) + return err; + + ptrace_setfcr31(target, fcr31); + } return err; } From 5baee6683437639d095234bbb86d29696a8293f6 Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Mon, 11 Dec 2017 22:55:40 +0000 Subject: [PATCH 1267/3326] MIPS: Also verify sizeof `elf_fpreg_t' with PTRACE_SETREGSET commit 006501e039eec411842bb3150c41358867d320c2 upstream. Complement commit d614fd58a283 ("mips/ptrace: Preserve previous registers for short regset write") and like with the PTRACE_GETREGSET ptrace(2) request also apply a BUILD_BUG_ON check for the size of the `elf_fpreg_t' type in the PTRACE_SETREGSET request handler. Signed-off-by: Maciej W. Rozycki Fixes: d614fd58a283 ("mips/ptrace: Preserve previous registers for short regset write") Cc: James Hogan Cc: Paul Burton Cc: Alex Smith Cc: Dave Martin Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/17929/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/ptrace.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index cad6a442ff8221..263defbbd50c70 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -438,6 +438,7 @@ static int fpr_get_msa(struct task_struct *target, u64 fpr_val; int err; + BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); for (i = 0; i < NUM_FPU_REGS; i++) { fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); err = user_regset_copyout(pos, count, kbuf, ubuf, From 3cb77e7b1cc0fdac101c44994555a1d0e056e99d Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Mon, 11 Dec 2017 22:56:54 +0000 Subject: [PATCH 1268/3326] MIPS: Disallow outsized PTRACE_SETREGSET NT_PRFPREG regset accesses commit c8c5a3a24d395b14447a9a89d61586a913840a3b upstream. Complement commit c23b3d1a5311 ("MIPS: ptrace: Change GP regset to use correct core dump register layout") and also reject outsized PTRACE_SETREGSET requests to the NT_PRFPREG regset, like with the NT_PRSTATUS regset. Signed-off-by: Maciej W. Rozycki Fixes: c23b3d1a5311 ("MIPS: ptrace: Change GP regset to use correct core dump register layout") Cc: James Hogan Cc: Paul Burton Cc: Alex Smith Cc: Dave Martin Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/17930/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/ptrace.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 263defbbd50c70..c552c20237d4f7 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -541,6 +541,9 @@ static int fpr_set(struct task_struct *target, BUG_ON(count % sizeof(elf_fpreg_t)); + if (pos + count > sizeof(elf_fpregset_t)) + return -EIO; + init_fp_ctx(target); if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) From 0196bdf590e3b0530a538c162a3d8fe46a363b5b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 20 Dec 2017 07:09:19 -0800 Subject: [PATCH 1269/3326] cgroup: fix css_task_iter crash on CSS_TASK_ITER_PROC commit 74d0833c659a8a54735e5efdd44f4b225af68586 upstream. While teaching css_task_iter to handle skipping over tasks which aren't group leaders, bc2fb7ed089f ("cgroup: add @flags to css_task_iter_start() and implement CSS_TASK_ITER_PROCS") introduced a silly bug. CSS_TASK_ITER_PROCS is implemented by repeating css_task_iter_advance() while the advanced cursor is pointing to a non-leader thread. However, the cursor variable, @l, wasn't updated when the iteration has to advance to the next css_set and the following repetition would operate on the terminal @l from the previous iteration which isn't pointing to a valid task leading to oopses like the following or infinite looping. BUG: unable to handle kernel NULL pointer dereference at 0000000000000254 IP: __task_pid_nr_ns+0xc7/0xf0 PGD 0 P4D 0 Oops: 0000 [#1] SMP ... CPU: 2 PID: 1 Comm: systemd Not tainted 4.14.4-200.fc26.x86_64 #1 Hardware name: System manufacturer System Product Name/PRIME B350M-A, BIOS 3203 11/09/2017 task: ffff88c4baee8000 task.stack: ffff96d5c3158000 RIP: 0010:__task_pid_nr_ns+0xc7/0xf0 RSP: 0018:ffff96d5c315bd50 EFLAGS: 00010206 RAX: 0000000000000000 RBX: ffff88c4b68c6000 RCX: 0000000000000250 RDX: ffffffffa5e47960 RSI: 0000000000000000 RDI: ffff88c490f6ab00 RBP: ffff96d5c315bd50 R08: 0000000000001000 R09: 0000000000000005 R10: ffff88c4be006b80 R11: ffff88c42f1b8004 R12: ffff96d5c315bf18 R13: ffff88c42d7dd200 R14: ffff88c490f6a510 R15: ffff88c4b68c6000 FS: 00007f9446f8ea00(0000) GS:ffff88c4be680000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000254 CR3: 00000007f956f000 CR4: 00000000003406e0 Call Trace: cgroup_procs_show+0x19/0x30 cgroup_seqfile_show+0x4c/0xb0 kernfs_seq_show+0x21/0x30 seq_read+0x2ec/0x3f0 kernfs_fop_read+0x134/0x180 __vfs_read+0x37/0x160 ? security_file_permission+0x9b/0xc0 vfs_read+0x8e/0x130 SyS_read+0x55/0xc0 entry_SYSCALL_64_fastpath+0x1a/0xa5 RIP: 0033:0x7f94455f942d RSP: 002b:00007ffe81ba2d00 EFLAGS: 00000293 ORIG_RAX: 0000000000000000 RAX: ffffffffffffffda RBX: 00005574e2233f00 RCX: 00007f94455f942d RDX: 0000000000001000 RSI: 00005574e2321a90 RDI: 000000000000002b RBP: 0000000000000000 R08: 00005574e2321a90 R09: 00005574e231de60 R10: 00007f94458c8b38 R11: 0000000000000293 R12: 00007f94458c8ae0 R13: 00007ffe81ba3800 R14: 0000000000000000 R15: 00005574e2116560 Code: 04 74 0e 89 f6 48 8d 04 76 48 8d 04 c5 f0 05 00 00 48 8b bf b8 05 00 00 48 01 c7 31 c0 48 8b 0f 48 85 c9 74 18 8b b2 30 08 00 00 <3b> 71 04 77 0d 48 c1 e6 05 48 01 f1 48 3b 51 38 74 09 5d c3 8b RIP: __task_pid_nr_ns+0xc7/0xf0 RSP: ffff96d5c315bd50 Fix it by moving the initialization of the cursor below the repeat label. While at it, rename it to @next for readability. Signed-off-by: Tejun Heo Fixes: bc2fb7ed089f ("cgroup: add @flags to css_task_iter_start() and implement CSS_TASK_ITER_PROCS") Reported-by: Laura Abbott Reported-by: Bronek Kozicki Reported-by: George Amanakis Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- kernel/cgroup/cgroup.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 44857278eb8aa6..030e4286f14c71 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -4059,26 +4059,24 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it) static void css_task_iter_advance(struct css_task_iter *it) { - struct list_head *l = it->task_pos; + struct list_head *next; lockdep_assert_held(&css_set_lock); - WARN_ON_ONCE(!l); - repeat: /* * Advance iterator to find next entry. cset->tasks is consumed * first and then ->mg_tasks. After ->mg_tasks, we move onto the * next cset. */ - l = l->next; + next = it->task_pos->next; - if (l == it->tasks_head) - l = it->mg_tasks_head->next; + if (next == it->tasks_head) + next = it->mg_tasks_head->next; - if (l == it->mg_tasks_head) + if (next == it->mg_tasks_head) css_task_iter_advance_css_set(it); else - it->task_pos = l; + it->task_pos = next; /* if PROCS, skip over tasks which aren't group leaders */ if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos && From 38244295d9e7663919d606cb9a005f7dee3b7799 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Wed, 3 Jan 2018 14:31:38 -0800 Subject: [PATCH 1270/3326] kvm: vmx: Scrub hardware GPRs at VM-exit commit 0cb5b30698fdc8f6b4646012e3acb4ddce430788 upstream. Guest GPR values are live in the hardware GPRs at VM-exit. Do not leave any guest values in hardware GPRs after the guest GPR values are saved to the vcpu_vmx structure. This is a partial mitigation for CVE 2017-5715 and CVE 2017-5753. Specifically, it defeats the Project Zero PoC for CVE 2017-5715. Suggested-by: Eric Northup Signed-off-by: Jim Mattson Reviewed-by: Eric Northup Reviewed-by: Benjamin Serebrin Reviewed-by: Andrew Honig [Paolo: Add AMD bits, Signed-off-by: Tom Lendacky ] Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/svm.c | 19 +++++++++++++++++++ arch/x86/kvm/vmx.c | 14 +++++++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 17fb6c6d939adb..714e4804cbe280 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -4964,6 +4964,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) "mov %%r13, %c[r13](%[svm]) \n\t" "mov %%r14, %c[r14](%[svm]) \n\t" "mov %%r15, %c[r15](%[svm]) \n\t" +#endif + /* + * Clear host registers marked as clobbered to prevent + * speculative use. + */ + "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t" + "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t" + "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t" + "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t" + "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t" +#ifdef CONFIG_X86_64 + "xor %%r8, %%r8 \n\t" + "xor %%r9, %%r9 \n\t" + "xor %%r10, %%r10 \n\t" + "xor %%r11, %%r11 \n\t" + "xor %%r12, %%r12 \n\t" + "xor %%r13, %%r13 \n\t" + "xor %%r14, %%r14 \n\t" + "xor %%r15, %%r15 \n\t" #endif "pop %%" _ASM_BP : diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 47d9432756f34a..93de00832a8254 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -9405,6 +9405,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" "pop %0 \n\t" + "setbe %c[fail](%0)\n\t" "mov %%" _ASM_AX ", %c[rax](%0) \n\t" "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" __ASM_SIZE(pop) " %c[rcx](%0) \n\t" @@ -9421,12 +9422,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "mov %%r13, %c[r13](%0) \n\t" "mov %%r14, %c[r14](%0) \n\t" "mov %%r15, %c[r15](%0) \n\t" + "xor %%r8d, %%r8d \n\t" + "xor %%r9d, %%r9d \n\t" + "xor %%r10d, %%r10d \n\t" + "xor %%r11d, %%r11d \n\t" + "xor %%r12d, %%r12d \n\t" + "xor %%r13d, %%r13d \n\t" + "xor %%r14d, %%r14d \n\t" + "xor %%r15d, %%r15d \n\t" #endif "mov %%cr2, %%" _ASM_AX " \n\t" "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" + "xor %%eax, %%eax \n\t" + "xor %%ebx, %%ebx \n\t" + "xor %%esi, %%esi \n\t" + "xor %%edi, %%edi \n\t" "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" - "setbe %c[fail](%0) \n\t" ".pushsection .rodata \n\t" ".global vmx_return \n\t" "vmx_return: " _ASM_PTR " 2b \n\t" From 203c1e538eb6abf6298c2a4214c92022a204fdbc Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 3 Jan 2018 12:49:29 +0100 Subject: [PATCH 1271/3326] platform/x86: wmi: Call acpi_wmi_init() later commit 98b8e4e5c17bf87c1b18ed929472051dab39878c upstream. Calling acpi_wmi_init() at the subsys_initcall() level causes ordering issues to appear on some systems and they are difficult to reproduce, because there is no guaranteed ordering between subsys_initcall() calls, so they may occur in different orders on different systems. In particular, commit 86d9f48534e8 (mm/slab: fix kmemcg cache creation delayed issue) exposed one of these issues where genl_init() and acpi_wmi_init() are both called at the same initcall level, but the former must run before the latter so as to avoid a NULL pointer dereference. For this reason, move the acpi_wmi_init() invocation to the initcall_sync level which should still be early enough for things to work correctly in the WMI land. Link: https://marc.info/?t=151274596700002&r=1&w=2 Reported-by: Jonathan McDowell Reported-by: Joonsoo Kim Tested-by: Jonathan McDowell Signed-off-by: Rafael J. Wysocki Signed-off-by: Darren Hart (VMware) Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/wmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 0765b1797d4c0d..7f8fa42a10840b 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -1268,5 +1268,5 @@ static void __exit acpi_wmi_exit(void) bus_unregister(&wmi_bus_type); } -subsys_initcall(acpi_wmi_init); +subsys_initcall_sync(acpi_wmi_init); module_exit(acpi_wmi_exit); From c7f500dd4ff1f9032e1f73cdb0d4bc8cb6515bb3 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Thu, 9 Nov 2017 07:14:43 -0800 Subject: [PATCH 1272/3326] iw_cxgb4: only call the cq comp_handler when the cq is armed commit cbb40fadd31c6bbc59104e58ac95c6ef492d038b upstream. The ULPs completion handler should only be called if the CQ is armed for notification. Signed-off-by: Steve Wise Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/cxgb4/ev.c | 8 +++++--- drivers/infiniband/hw/cxgb4/qp.c | 20 ++++++++++++-------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index 8f963df0bffceb..9d25298d96faf0 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c @@ -109,9 +109,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, if (qhp->ibqp.event_handler) (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); - spin_lock_irqsave(&chp->comp_handler_lock, flag); - (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); - spin_unlock_irqrestore(&chp->comp_handler_lock, flag); + if (t4_clear_cq_armed(&chp->cq)) { + spin_lock_irqsave(&chp->comp_handler_lock, flag); + (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); + spin_unlock_irqrestore(&chp->comp_handler_lock, flag); + } } void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index e69453665a174a..282046ebaeb280 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -817,10 +817,12 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) t4_swcq_produce(cq); spin_unlock_irqrestore(&schp->lock, flag); - spin_lock_irqsave(&schp->comp_handler_lock, flag); - (*schp->ibcq.comp_handler)(&schp->ibcq, - schp->ibcq.cq_context); - spin_unlock_irqrestore(&schp->comp_handler_lock, flag); + if (t4_clear_cq_armed(&schp->cq)) { + spin_lock_irqsave(&schp->comp_handler_lock, flag); + (*schp->ibcq.comp_handler)(&schp->ibcq, + schp->ibcq.cq_context); + spin_unlock_irqrestore(&schp->comp_handler_lock, flag); + } } static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) @@ -846,10 +848,12 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) t4_swcq_produce(cq); spin_unlock_irqrestore(&rchp->lock, flag); - spin_lock_irqsave(&rchp->comp_handler_lock, flag); - (*rchp->ibcq.comp_handler)(&rchp->ibcq, - rchp->ibcq.cq_context); - spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); + if (t4_clear_cq_armed(&rchp->cq)) { + spin_lock_irqsave(&rchp->comp_handler_lock, flag); + (*rchp->ibcq.comp_handler)(&rchp->ibcq, + rchp->ibcq.cq_context); + spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); + } } int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, From f73c380ab3fea85e4fbc66f5cc6f9e6551b89f24 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Thu, 9 Nov 2017 07:21:26 -0800 Subject: [PATCH 1273/3326] iw_cxgb4: atomically flush the qp commit bc52e9ca74b9a395897bb640c6671b2cbf716032 upstream. __flush_qp() has a race condition where during the flush operation, the qp lock is released allowing another thread to possibly post a WR, which corrupts the queue state, possibly causing crashes. The lock was released to preserve the cq/qp locking hierarchy of cq first, then qp. However releasing the qp lock is not necessary; both RQ and SQ CQ locks can be acquired first, followed by the qp lock, and then the RQ and SQ flushing can be done w/o unlocking. Signed-off-by: Steve Wise Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/cxgb4/qp.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 282046ebaeb280..c7402f4991a799 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1271,31 +1271,34 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); - /* locking hierarchy: cq lock first, then qp lock. */ + /* locking hierarchy: cqs lock first, then qp lock. */ spin_lock_irqsave(&rchp->lock, flag); + if (schp != rchp) + spin_lock(&schp->lock); spin_lock(&qhp->lock); if (qhp->wq.flushed) { spin_unlock(&qhp->lock); + if (schp != rchp) + spin_unlock(&schp->lock); spin_unlock_irqrestore(&rchp->lock, flag); return; } qhp->wq.flushed = 1; + t4_set_wq_in_error(&qhp->wq); c4iw_flush_hw_cq(rchp); c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); - spin_unlock(&qhp->lock); - spin_unlock_irqrestore(&rchp->lock, flag); - /* locking hierarchy: cq lock first, then qp lock. */ - spin_lock_irqsave(&schp->lock, flag); - spin_lock(&qhp->lock); if (schp != rchp) c4iw_flush_hw_cq(schp); sq_flushed = c4iw_flush_sq(qhp); + spin_unlock(&qhp->lock); - spin_unlock_irqrestore(&schp->lock, flag); + if (schp != rchp) + spin_unlock(&schp->lock); + spin_unlock_irqrestore(&rchp->lock, flag); if (schp == rchp) { if (t4_clear_cq_armed(&rchp->cq) && @@ -1329,8 +1332,8 @@ static void flush_qp(struct c4iw_qp *qhp) rchp = to_c4iw_cq(qhp->ibqp.recv_cq); schp = to_c4iw_cq(qhp->ibqp.send_cq); - t4_set_wq_in_error(&qhp->wq); if (qhp->ibqp.uobject) { + t4_set_wq_in_error(&qhp->wq); t4_set_cq_in_error(&rchp->cq); spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); From 5042bde3ccbbd2326ac349a01109c8e56fc7bc4b Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Thu, 30 Nov 2017 09:41:56 -0800 Subject: [PATCH 1274/3326] iw_cxgb4: only clear the ARMED bit if a notification is needed commit 335ebf6fa35ca1c59b73f76fad19b249d3550e86 upstream. In __flush_qp(), the CQ ARMED bit was being cleared regardless of whether any notification is actually needed. This resulted in the iser termination logic getting stuck in ib_drain_sq() because the CQ was not marked ARMED and thus the drain CQE notification wasn't triggered. This new bug was exposed when this commit was merged: commit cbb40fadd31c ("iw_cxgb4: only call the cq comp_handler when the cq is armed") Signed-off-by: Steve Wise Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/cxgb4/qp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index c7402f4991a799..74864c4cdf9cb9 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1301,21 +1301,21 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, spin_unlock_irqrestore(&rchp->lock, flag); if (schp == rchp) { - if (t4_clear_cq_armed(&rchp->cq) && - (rq_flushed || sq_flushed)) { + if ((rq_flushed || sq_flushed) && + t4_clear_cq_armed(&rchp->cq)) { spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); } } else { - if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { + if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) { spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); } - if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { + if (sq_flushed && t4_clear_cq_armed(&schp->cq)) { spin_lock_irqsave(&schp->comp_handler_lock, flag); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); From 623b8f8e90f0bb73e9566a6744b5c0577284977e Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Tue, 19 Dec 2017 10:29:25 -0800 Subject: [PATCH 1275/3326] iw_cxgb4: reflect the original WR opcode in drain cqes commit 96a236ed286776554fbd227c6d2876fd3b5dc65d upstream. The flush/drain logic was not retaining the original wr opcode in its completion. This can cause problems if the application uses the completion opcode to make decisions. Use bit 10 of the CQE header word to indicate the CQE is a special drain completion, and save the original WR opcode in the cqe header opcode field. Fixes: 4fe7c2962e11 ("iw_cxgb4: refactor sq/rq drain logic") Signed-off-by: Steve Wise Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/cxgb4/cq.c | 7 ++-- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 2 -- drivers/infiniband/hw/cxgb4/qp.c | 46 +++++++++++++++++++++++--- drivers/infiniband/hw/cxgb4/t4.h | 6 ++++ 4 files changed, 50 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 514c1000ded125..73feeeeb428309 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -410,7 +410,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) { - if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) { + if (DRAIN_CQE(cqe)) { WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid); return 0; } @@ -509,7 +509,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, /* * Special cqe for drain WR completions... */ - if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) { + if (DRAIN_CQE(hw_cqe)) { *cookie = CQE_DRAIN_COOKIE(hw_cqe); *cqe = *hw_cqe; goto skip_cqe; @@ -766,9 +766,6 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) c4iw_invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe)); break; - case C4IW_DRAIN_OPCODE: - wc->opcode = IB_WC_SEND; - break; default: pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n", CQE_OPCODE(&cqe), CQE_QPID(&cqe)); diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 819a30635d53be..20c481115a996b 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -631,8 +631,6 @@ static inline int to_ib_qp_state(int c4iw_qp_state) return IB_QPS_ERR; } -#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN - static inline u32 c4iw_ib_to_tpt_access(int a) { return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 74864c4cdf9cb9..6e91c697d67a53 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -794,21 +794,57 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) return 0; } -static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) +static int ib_to_fw_opcode(int ib_opcode) +{ + int opcode; + + switch (ib_opcode) { + case IB_WR_SEND_WITH_INV: + opcode = FW_RI_SEND_WITH_INV; + break; + case IB_WR_SEND: + opcode = FW_RI_SEND; + break; + case IB_WR_RDMA_WRITE: + opcode = FW_RI_RDMA_WRITE; + break; + case IB_WR_RDMA_READ: + case IB_WR_RDMA_READ_WITH_INV: + opcode = FW_RI_READ_REQ; + break; + case IB_WR_REG_MR: + opcode = FW_RI_FAST_REGISTER; + break; + case IB_WR_LOCAL_INV: + opcode = FW_RI_LOCAL_INV; + break; + default: + opcode = -EINVAL; + } + return opcode; +} + +static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) { struct t4_cqe cqe = {}; struct c4iw_cq *schp; unsigned long flag; struct t4_cq *cq; + int opcode; schp = to_c4iw_cq(qhp->ibqp.send_cq); cq = &schp->cq; + opcode = ib_to_fw_opcode(wr->opcode); + if (opcode < 0) + return opcode; + cqe.u.drain_cookie = wr->wr_id; cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | - CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | + CQE_OPCODE_V(opcode) | CQE_TYPE_V(1) | CQE_SWCQE_V(1) | + CQE_DRAIN_V(1) | CQE_QPID_V(qhp->wq.sq.qid)); spin_lock_irqsave(&schp->lock, flag); @@ -823,6 +859,7 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) schp->ibcq.cq_context); spin_unlock_irqrestore(&schp->comp_handler_lock, flag); } + return 0; } static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) @@ -837,9 +874,10 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) cqe.u.drain_cookie = wr->wr_id; cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | - CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | + CQE_OPCODE_V(FW_RI_SEND) | CQE_TYPE_V(0) | CQE_SWCQE_V(1) | + CQE_DRAIN_V(1) | CQE_QPID_V(qhp->wq.sq.qid)); spin_lock_irqsave(&rchp->lock, flag); @@ -879,7 +917,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, */ if (qhp->wq.flushed) { spin_unlock_irqrestore(&qhp->lock, flag); - complete_sq_drain_wr(qhp, wr); + err = complete_sq_drain_wr(qhp, wr); return err; } num_wrs = t4_sq_avail(&qhp->wq); diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index bcb80ca67d3d33..80b390e861dcf9 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -197,6 +197,11 @@ struct t4_cqe { #define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M) #define CQE_SWCQE_V(x) ((x)<> CQE_DRAIN_S)) & CQE_DRAIN_M) +#define CQE_DRAIN_V(x) ((x)<> CQE_STATUS_S)) & CQE_STATUS_M) @@ -213,6 +218,7 @@ struct t4_cqe { #define CQE_OPCODE_V(x) ((x)<header))) +#define DRAIN_CQE(x) (CQE_DRAIN_G(be32_to_cpu((x)->header))) #define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header))) #define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header))) #define SQ_TYPE(x) (CQE_TYPE((x))) From 4659a641b36ed3f96c575ac1ef919a4d1c103936 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Tue, 19 Dec 2017 14:02:10 -0800 Subject: [PATCH 1276/3326] iw_cxgb4: when flushing, complete all wrs in a chain commit d14587334580bc94d3ee11e8320e0c157f91ae8f upstream. If a wr chain was posted and needed to be flushed, only the first wr in the chain was completed with FLUSHED status. The rest were never completed. This caused isert to hang on shutdown due to the missing completions which left iscsi IO commands referenced, stalling the shutdown. Fixes: 4fe7c2962e11 ("iw_cxgb4: refactor sq/rq drain logic") Signed-off-by: Steve Wise Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/cxgb4/qp.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 6e91c697d67a53..f311ea73c806fe 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -862,6 +862,22 @@ static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) return 0; } +static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + int ret = 0; + + while (wr) { + ret = complete_sq_drain_wr(qhp, wr); + if (ret) { + *bad_wr = wr; + break; + } + wr = wr->next; + } + return ret; +} + static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) { struct t4_cqe cqe = {}; @@ -894,6 +910,14 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) } } +static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr) +{ + while (wr) { + complete_rq_drain_wr(qhp, wr); + wr = wr->next; + } +} + int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { @@ -917,7 +941,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, */ if (qhp->wq.flushed) { spin_unlock_irqrestore(&qhp->lock, flag); - err = complete_sq_drain_wr(qhp, wr); + err = complete_sq_drain_wrs(qhp, wr, bad_wr); return err; } num_wrs = t4_sq_avail(&qhp->wq); @@ -1066,7 +1090,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, */ if (qhp->wq.flushed) { spin_unlock_irqrestore(&qhp->lock, flag); - complete_rq_drain_wr(qhp, wr); + complete_rq_drain_wrs(qhp, wr); return err; } num_wrs = t4_rq_avail(&qhp->wq); From 5bdc95c0a0dc58c15dcd3ffb56497683f800c455 Mon Sep 17 00:00:00 2001 From: Vikas C Sajjan Date: Thu, 16 Nov 2017 21:43:44 +0530 Subject: [PATCH 1277/3326] x86/acpi: Handle SCI interrupts above legacy space gracefully commit 252714155f04c5d16989cb3aadb85fd1b5772f99 upstream. Platforms which support only IOAPIC mode, pass the SCI information above the legacy space (0-15) via the FADT mechanism and not via MADT. In such cases mp_override_legacy_irq() which is invoked from acpi_sci_ioapic_setup() to register SCI interrupts fails for interrupts greater equal 16, since it is meant to handle only the legacy space and emits error "Invalid bus_irq %u for legacy override". Add a new function to handle SCI interrupts >= 16 and invoke it conditionally in acpi_sci_ioapic_setup(). The code duplication due to this new function will be cleaned up in a separate patch. Co-developed-by: Sunil V L Signed-off-by: Vikas C Sajjan Signed-off-by: Sunil V L Signed-off-by: Thomas Gleixner Tested-by: Abdul Lateef Attar Acked-by: Rafael J. Wysocki Cc: linux-pm@vger.kernel.org Cc: kkamagui@gmail.com Cc: linux-acpi@vger.kernel.org Link: https://lkml.kernel.org/r/1510848825-21965-2-git-send-email-vikas.cha.sajjan@hpe.com Cc: Jean Delvare Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/acpi/boot.c | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 079535e53e2a64..d75a7466bc8430 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -429,6 +429,34 @@ static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger, return 0; } +static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity, + u8 trigger, u32 gsi) +{ + struct mpc_intsrc mp_irq; + int ioapic, pin; + + /* Convert 'gsi' to 'ioapic.pin'(INTIN#) */ + ioapic = mp_find_ioapic(gsi); + if (ioapic < 0) { + pr_warn("Failed to find ioapic for gsi : %u\n", gsi); + return ioapic; + } + + pin = mp_find_ioapic_pin(ioapic, gsi); + + mp_irq.type = MP_INTSRC; + mp_irq.irqtype = mp_INT; + mp_irq.irqflag = (trigger << 2) | polarity; + mp_irq.srcbus = MP_ISA_BUS; + mp_irq.srcbusirq = bus_irq; + mp_irq.dstapic = mpc_ioapic_id(ioapic); + mp_irq.dstirq = pin; + + mp_save_irq(&mp_irq); + + return 0; +} + static int __init acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) { @@ -473,7 +501,11 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK) polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; - mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); + if (bus_irq < NR_IRQS_LEGACY) + mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); + else + mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi); + acpi_penalize_sci_irq(bus_irq, trigger, polarity); /* From f1069cfedae62190bc058a4c372fa60bcf37f75c Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 1 Jan 2018 09:50:50 +0100 Subject: [PATCH 1278/3326] ALSA: pcm: Remove incorrect snd_BUG_ON() usages commit fe08f34d066f4404934a509b6806db1a4f700c86 upstream. syzkaller triggered kernel warnings through PCM OSS emulation at closing a stream: WARNING: CPU: 0 PID: 3502 at sound/core/pcm_lib.c:1635 snd_pcm_hw_param_first+0x289/0x690 sound/core/pcm_lib.c:1635 Call Trace: .... snd_pcm_hw_param_near.constprop.27+0x78d/0x9a0 sound/core/oss/pcm_oss.c:457 snd_pcm_oss_change_params+0x17d3/0x3720 sound/core/oss/pcm_oss.c:969 snd_pcm_oss_make_ready+0xaa/0x130 sound/core/oss/pcm_oss.c:1128 snd_pcm_oss_sync+0x257/0x830 sound/core/oss/pcm_oss.c:1638 snd_pcm_oss_release+0x20b/0x280 sound/core/oss/pcm_oss.c:2431 __fput+0x327/0x7e0 fs/file_table.c:210 .... This happens while it tries to open and set up the aloop device concurrently. The warning above (invoked from snd_BUG_ON() macro) is to detect the unexpected logical error where snd_pcm_hw_refine() call shouldn't fail. The theory is true for the case where the hw_params config rules are static. But for an aloop device, the hw_params rule condition does vary dynamically depending on the connected target; when another device is opened and changes the parameters, the device connected in another side is also affected, and it caused the error from snd_pcm_hw_refine(). That is, the simplest "solution" for this is to remove the incorrect assumption of static rules, and treat such an error as a normal error path. As there are a couple of other places using snd_BUG_ON() incorrectly, this patch removes these spurious snd_BUG_ON() calls. Reported-by: syzbot+6f11c7e2a1b91d466432@syzkaller.appspotmail.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/oss/pcm_oss.c | 1 - sound/core/pcm_lib.c | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index e49f448ee04f4a..ceaa51f76591b7 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -455,7 +455,6 @@ static int snd_pcm_hw_param_near(struct snd_pcm_substream *pcm, v = snd_pcm_hw_param_last(pcm, params, var, dir); else v = snd_pcm_hw_param_first(pcm, params, var, dir); - snd_BUG_ON(v < 0); return v; } diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 10e7ef7a8804b1..db7894bb028ccc 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -1632,7 +1632,7 @@ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); - if (snd_BUG_ON(err < 0)) + if (err < 0) return err; } return snd_pcm_hw_param_value(params, var, dir); @@ -1678,7 +1678,7 @@ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); - if (snd_BUG_ON(err < 0)) + if (err < 0) return err; } return snd_pcm_hw_param_value(params, var, dir); From ef810a3d7d4e5c3d5a84e1169157adc11f81c6cd Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 3 Jan 2018 15:16:30 +0100 Subject: [PATCH 1279/3326] ALSA: pcm: Workaround for weird PulseAudio behavior on rewind error commit fb51f1cd06f9ced7b7085a2a4636375d520431ca upstream. The commit 9027c4639ef1 ("ALSA: pcm: Call ack() whenever appl_ptr is updated") introduced the possible error code returned from the PCM rewind ioctl. Basically the change was for handling the indirect PCM more correctly, but ironically, it caused rather a side-effect: PulseAudio gets pissed off when receiving an error from rewind, throws everything away and stops processing further, resulting in the silence. It's clearly a failure in the application side, so the best would be to fix that bug in PA. OTOH, PA is mostly the only user of the rewind feature, so it's not good to slap the sole customer. This patch tries to mitigate the situation: instead of returning an error, now the rewind ioctl returns zero when the driver can't rewind. It indicates that no rewind was performed, so the behavior is consistent, at least. Fixes: 9027c4639ef1 ("ALSA: pcm: Call ack() whenever appl_ptr is updated") Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/pcm_native.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 2fec2feac387d3..499f75b18e096d 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -2582,7 +2582,7 @@ static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream, return ret < 0 ? ret : frames; } -/* decrease the appl_ptr; returns the processed frames or a negative error */ +/* decrease the appl_ptr; returns the processed frames or zero for error */ static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream, snd_pcm_uframes_t frames, snd_pcm_sframes_t avail) @@ -2599,7 +2599,12 @@ static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream, if (appl_ptr < 0) appl_ptr += runtime->boundary; ret = pcm_lib_apply_appl_ptr(substream, appl_ptr); - return ret < 0 ? ret : frames; + /* NOTE: we return zero for errors because PulseAudio gets depressed + * upon receiving an error from rewind ioctl and stops processing + * any longer. Returning zero means that no rewind is done, so + * it's not absolutely wrong to answer like that. + */ + return ret < 0 ? 0 : frames; } static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream, From 835004dc452aa5d0a2c25d65d9026f1d0a637ed4 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 4 Jan 2018 16:39:27 +0100 Subject: [PATCH 1280/3326] ALSA: pcm: Add missing error checks in OSS emulation plugin builder commit 6708913750344a900f2e73bfe4a4d6dbbce4fe8d upstream. In the OSS emulation plugin builder where the frame size is parsed in the plugin chain, some places miss the possible errors returned from the plugin src_ or dst_frames callback. This patch papers over such places. Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/oss/pcm_plugin.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c index cadc937928683c..85a56af104bd61 100644 --- a/sound/core/oss/pcm_plugin.c +++ b/sound/core/oss/pcm_plugin.c @@ -592,18 +592,26 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st snd_pcm_sframes_t frames = size; plugin = snd_pcm_plug_first(plug); - while (plugin && frames > 0) { + while (plugin) { + if (frames <= 0) + return frames; if ((next = plugin->next) != NULL) { snd_pcm_sframes_t frames1 = frames; - if (plugin->dst_frames) + if (plugin->dst_frames) { frames1 = plugin->dst_frames(plugin, frames); + if (frames1 <= 0) + return frames1; + } if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) { return err; } if (err != frames1) { frames = err; - if (plugin->src_frames) + if (plugin->src_frames) { frames = plugin->src_frames(plugin, frames1); + if (frames <= 0) + return frames; + } } } else dst_channels = NULL; From 28ac4032652f7dcc65637bfa3f0c539ac6c772de Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 8 Jan 2018 13:58:31 +0100 Subject: [PATCH 1281/3326] ALSA: pcm: Abort properly at pending signal in OSS read/write loops commit 29159a4ed7044c52e3e2cf1a9fb55cec4745c60b upstream. The loops for read and write in PCM OSS emulation have no proper check of pending signals, and they keep processing even after user tries to break. This results in a very long delay, often seen as RCU stall when a huge unprocessed bytes remain queued. The bug could be easily triggered by syzkaller. As a simple workaround, this patch adds the proper check of pending signals and aborts the loop appropriately. Reported-by: syzbot+993cb4cfcbbff3947c21@syzkaller.appspotmail.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/oss/pcm_oss.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index ceaa51f76591b7..e317964bd2eab1 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -1381,6 +1381,10 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha tmp != runtime->oss.period_bytes) break; } + if (signal_pending(current)) { + tmp = -ERESTARTSYS; + goto err; + } } mutex_unlock(&runtime->oss.params_lock); return xfer; @@ -1466,6 +1470,10 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use bytes -= tmp; xfer += tmp; } + if (signal_pending(current)) { + tmp = -ERESTARTSYS; + goto err; + } } mutex_unlock(&runtime->oss.params_lock); return xfer; From 0b9c7ed68db295f82e91654ba3ff3ee1346c358d Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 8 Jan 2018 14:03:53 +0100 Subject: [PATCH 1282/3326] ALSA: pcm: Allow aborting mutex lock at OSS read/write loops commit 900498a34a3ac9c611e9b425094c8106bdd7dc1c upstream. PCM OSS read/write loops keep taking the mutex lock for the whole read/write, and this might take very long when the exceptionally high amount of data is given. Also, since it invokes with mutex_lock(), the concurrent read/write becomes unbreakable. This patch tries to address these issues by replacing mutex_lock() with mutex_lock_interruptible(), and also splits / re-takes the lock at each read/write period chunk, so that it can switch the context more finely if requested. Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/oss/pcm_oss.c | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index e317964bd2eab1..c2db7e905f7d69 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -1334,8 +1334,11 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) return tmp; - mutex_lock(&runtime->oss.params_lock); while (bytes > 0) { + if (mutex_lock_interruptible(&runtime->oss.params_lock)) { + tmp = -ERESTARTSYS; + break; + } if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { tmp = bytes; if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes) @@ -1379,18 +1382,18 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha xfer += tmp; if ((substream->f_flags & O_NONBLOCK) != 0 && tmp != runtime->oss.period_bytes) - break; + tmp = -EAGAIN; } + err: + mutex_unlock(&runtime->oss.params_lock); + if (tmp < 0) + break; if (signal_pending(current)) { tmp = -ERESTARTSYS; - goto err; + break; } + tmp = 0; } - mutex_unlock(&runtime->oss.params_lock); - return xfer; - - err: - mutex_unlock(&runtime->oss.params_lock); return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; } @@ -1438,8 +1441,11 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) return tmp; - mutex_lock(&runtime->oss.params_lock); while (bytes > 0) { + if (mutex_lock_interruptible(&runtime->oss.params_lock)) { + tmp = -ERESTARTSYS; + break; + } if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { if (runtime->oss.buffer_used == 0) { tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1); @@ -1470,16 +1476,16 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use bytes -= tmp; xfer += tmp; } + err: + mutex_unlock(&runtime->oss.params_lock); + if (tmp < 0) + break; if (signal_pending(current)) { tmp = -ERESTARTSYS; - goto err; + break; } + tmp = 0; } - mutex_unlock(&runtime->oss.params_lock); - return xfer; - - err: - mutex_unlock(&runtime->oss.params_lock); return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; } From cd32d7c4d2349ec5c0ffb0898c29bd3f28daf552 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 5 Jan 2018 16:09:47 +0100 Subject: [PATCH 1283/3326] ALSA: aloop: Release cable upon open error path commit 9685347aa0a5c2869058ca6ab79fd8e93084a67f upstream. The aloop runtime object and its assignment in the cable are left even when opening a substream fails. This doesn't mean any memory leak, but it still keeps the invalid pointer that may be referred by the another side of the cable spontaneously, which is a potential Oops cause. Clean up the cable assignment and the empty cable upon the error path properly. Fixes: 597603d615d2 ("ALSA: introduce the snd-aloop module for the PCM loopback") Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/drivers/aloop.c | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index 135adb17703cc4..3c09530427eaa9 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -658,12 +658,31 @@ static int rule_channels(struct snd_pcm_hw_params *params, return snd_interval_refine(hw_param_interval(params, rule->var), &t); } +static void free_cable(struct snd_pcm_substream *substream) +{ + struct loopback *loopback = substream->private_data; + int dev = get_cable_index(substream); + struct loopback_cable *cable; + + cable = loopback->cables[substream->number][dev]; + if (!cable) + return; + if (cable->streams[!substream->stream]) { + /* other stream is still alive */ + cable->streams[substream->stream] = NULL; + } else { + /* free the cable */ + loopback->cables[substream->number][dev] = NULL; + kfree(cable); + } +} + static int loopback_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct loopback *loopback = substream->private_data; struct loopback_pcm *dpcm; - struct loopback_cable *cable; + struct loopback_cable *cable = NULL; int err = 0; int dev = get_cable_index(substream); @@ -682,7 +701,6 @@ static int loopback_open(struct snd_pcm_substream *substream) if (!cable) { cable = kzalloc(sizeof(*cable), GFP_KERNEL); if (!cable) { - kfree(dpcm); err = -ENOMEM; goto unlock; } @@ -724,6 +742,10 @@ static int loopback_open(struct snd_pcm_substream *substream) else runtime->hw = cable->hw; unlock: + if (err < 0) { + free_cable(substream); + kfree(dpcm); + } mutex_unlock(&loopback->cable_lock); return err; } @@ -732,20 +754,10 @@ static int loopback_close(struct snd_pcm_substream *substream) { struct loopback *loopback = substream->private_data; struct loopback_pcm *dpcm = substream->runtime->private_data; - struct loopback_cable *cable; - int dev = get_cable_index(substream); loopback_timer_stop(dpcm); mutex_lock(&loopback->cable_lock); - cable = loopback->cables[substream->number][dev]; - if (cable->streams[!substream->stream]) { - /* other stream is still alive */ - cable->streams[substream->stream] = NULL; - } else { - /* free the cable */ - loopback->cables[substream->number][dev] = NULL; - kfree(cable); - } + free_cable(substream); mutex_unlock(&loopback->cable_lock); return 0; } From 7ea0bfaa76e90d98ec1bd718c9c03df24881afd5 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 5 Jan 2018 16:15:33 +0100 Subject: [PATCH 1284/3326] ALSA: aloop: Fix inconsistent format due to incomplete rule commit b088b53e20c7d09b5ab84c5688e609f478e5c417 upstream. The extra hw constraint rule for the formats the aloop driver introduced has a slight flaw, where it doesn't return a positive value when the mask got changed. It came from the fact that it's basically a copy&paste from snd_hw_constraint_mask64(). The original code is supposed to be a single-shot and it modifies the mask bits only once and never after, while what we need for aloop is the dynamic hw rule that limits the mask bits. This difference results in the inconsistent state, as the hw_refine doesn't apply the dependencies fully. The worse and surprisingly result is that it causes a crash in OSS emulation when multiple full-duplex reads/writes are performed concurrently (I leave why it triggers Oops to readers as a homework). For fixing this, replace a few open-codes with the standard snd_mask_*() macros. Reported-by: syzbot+3902b5220e8ca27889ca@syzkaller.appspotmail.com Fixes: b1c73fc8e697 ("ALSA: snd-aloop: Fix hw_params restrictions and checking") Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/drivers/aloop.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index 3c09530427eaa9..5e28d5e1a131e3 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -622,14 +623,12 @@ static int rule_format(struct snd_pcm_hw_params *params, { struct snd_pcm_hardware *hw = rule->private; - struct snd_mask *maskp = hw_param_mask(params, rule->var); + struct snd_mask m; - maskp->bits[0] &= (u_int32_t)hw->formats; - maskp->bits[1] &= (u_int32_t)(hw->formats >> 32); - memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */ - if (! maskp->bits[0] && ! maskp->bits[1]) - return -EINVAL; - return 0; + snd_mask_none(&m); + m.bits[0] = (u_int32_t)hw->formats; + m.bits[1] = (u_int32_t)(hw->formats >> 32); + return snd_mask_refine(hw_param_mask(params, rule->var), &m); } static int rule_rate(struct snd_pcm_hw_params *params, From d2363bb2b5238dd5f02585b21102eacc5f4789bc Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 4 Jan 2018 17:38:54 +0100 Subject: [PATCH 1285/3326] ALSA: aloop: Fix racy hw constraints adjustment commit 898dfe4687f460ba337a01c11549f87269a13fa2 upstream. The aloop driver tries to update the hw constraints of the connected target on the cable of the opened PCM substream. This is done by adding the extra hw constraints rules referring to the substream runtime->hw fields, while the other substream may update the runtime hw of another side on the fly. This is, however, racy and may result in the inconsistent values when both PCM streams perform the prepare concurrently. One of the reason is that it overwrites the other's runtime->hw field; which is not only racy but also broken when it's called before the open of another side finishes. And, since the reference to runtime->hw isn't protected, the concurrent write may give the partial value update and become inconsistent. This patch is an attempt to fix and clean up: - The prepare doesn't change the runtime->hw of other side any longer, but only update the cable->hw that is referred commonly. - The extra rules refer to the loopback_pcm object instead of the runtime->hw. The actual hw is deduced from cable->hw. - The extra rules take the cable_lock to protect against the race. Fixes: b1c73fc8e697 ("ALSA: snd-aloop: Fix hw_params restrictions and checking") Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/drivers/aloop.c | 51 ++++++++++++++++++------------------------- 1 file changed, 21 insertions(+), 30 deletions(-) diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index 5e28d5e1a131e3..386ee829c6555f 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -306,19 +306,6 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd) return 0; } -static void params_change_substream(struct loopback_pcm *dpcm, - struct snd_pcm_runtime *runtime) -{ - struct snd_pcm_runtime *dst_runtime; - - if (dpcm == NULL || dpcm->substream == NULL) - return; - dst_runtime = dpcm->substream->runtime; - if (dst_runtime == NULL) - return; - dst_runtime->hw = dpcm->cable->hw; -} - static void params_change(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; @@ -330,10 +317,6 @@ static void params_change(struct snd_pcm_substream *substream) cable->hw.rate_max = runtime->rate; cable->hw.channels_min = runtime->channels; cable->hw.channels_max = runtime->channels; - params_change_substream(cable->streams[SNDRV_PCM_STREAM_PLAYBACK], - runtime); - params_change_substream(cable->streams[SNDRV_PCM_STREAM_CAPTURE], - runtime); } static int loopback_prepare(struct snd_pcm_substream *substream) @@ -621,24 +604,29 @@ static unsigned int get_cable_index(struct snd_pcm_substream *substream) static int rule_format(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { - - struct snd_pcm_hardware *hw = rule->private; + struct loopback_pcm *dpcm = rule->private; + struct loopback_cable *cable = dpcm->cable; struct snd_mask m; snd_mask_none(&m); - m.bits[0] = (u_int32_t)hw->formats; - m.bits[1] = (u_int32_t)(hw->formats >> 32); + mutex_lock(&dpcm->loopback->cable_lock); + m.bits[0] = (u_int32_t)cable->hw.formats; + m.bits[1] = (u_int32_t)(cable->hw.formats >> 32); + mutex_unlock(&dpcm->loopback->cable_lock); return snd_mask_refine(hw_param_mask(params, rule->var), &m); } static int rule_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { - struct snd_pcm_hardware *hw = rule->private; + struct loopback_pcm *dpcm = rule->private; + struct loopback_cable *cable = dpcm->cable; struct snd_interval t; - t.min = hw->rate_min; - t.max = hw->rate_max; + mutex_lock(&dpcm->loopback->cable_lock); + t.min = cable->hw.rate_min; + t.max = cable->hw.rate_max; + mutex_unlock(&dpcm->loopback->cable_lock); t.openmin = t.openmax = 0; t.integer = 0; return snd_interval_refine(hw_param_interval(params, rule->var), &t); @@ -647,11 +635,14 @@ static int rule_rate(struct snd_pcm_hw_params *params, static int rule_channels(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { - struct snd_pcm_hardware *hw = rule->private; + struct loopback_pcm *dpcm = rule->private; + struct loopback_cable *cable = dpcm->cable; struct snd_interval t; - t.min = hw->channels_min; - t.max = hw->channels_max; + mutex_lock(&dpcm->loopback->cable_lock); + t.min = cable->hw.channels_min; + t.max = cable->hw.channels_max; + mutex_unlock(&dpcm->loopback->cable_lock); t.openmin = t.openmax = 0; t.integer = 0; return snd_interval_refine(hw_param_interval(params, rule->var), &t); @@ -717,19 +708,19 @@ static int loopback_open(struct snd_pcm_substream *substream) /* are cached -> they do not reflect the actual state */ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, - rule_format, &runtime->hw, + rule_format, dpcm, SNDRV_PCM_HW_PARAM_FORMAT, -1); if (err < 0) goto unlock; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, - rule_rate, &runtime->hw, + rule_rate, dpcm, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) goto unlock; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, - rule_channels, &runtime->hw, + rule_channels, dpcm, SNDRV_PCM_HW_PARAM_CHANNELS, -1); if (err < 0) goto unlock; From 0ed11aaf6f2a72c1923c777c4edb670cfa286fcf Mon Sep 17 00:00:00 2001 From: Vikas C Sajjan Date: Thu, 16 Nov 2017 21:43:45 +0530 Subject: [PATCH 1286/3326] x86/acpi: Reduce code duplication in mp_override_legacy_irq() commit 4ee2ec1b122599f7b10c849fa7915cebb37b7edb upstream. The new function mp_register_ioapic_irq() is a subset of the code in mp_override_legacy_irq(). Replace the code duplication by invoking mp_register_ioapic_irq() from mp_override_legacy_irq(). Signed-off-by: Vikas C Sajjan Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Cc: linux-pm@vger.kernel.org Cc: kkamagui@gmail.com Cc: linux-acpi@vger.kernel.org Link: https://lkml.kernel.org/r/1510848825-21965-3-git-send-email-vikas.cha.sajjan@hpe.com Cc: Jean Delvare Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/acpi/boot.c | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index d75a7466bc8430..9c2a002d9297cc 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -342,13 +342,12 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e #ifdef CONFIG_X86_IO_APIC #define MP_ISA_BUS 0 +static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity, + u8 trigger, u32 gsi); + static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) { - int ioapic; - int pin; - struct mpc_intsrc mp_irq; - /* * Check bus_irq boundary. */ @@ -357,14 +356,6 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, return; } - /* - * Convert 'gsi' to 'ioapic.pin'. - */ - ioapic = mp_find_ioapic(gsi); - if (ioapic < 0) - return; - pin = mp_find_ioapic_pin(ioapic, gsi); - /* * TBD: This check is for faulty timer entries, where the override * erroneously sets the trigger to level, resulting in a HUGE @@ -373,16 +364,8 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, if ((bus_irq == 0) && (trigger == 3)) trigger = 1; - mp_irq.type = MP_INTSRC; - mp_irq.irqtype = mp_INT; - mp_irq.irqflag = (trigger << 2) | polarity; - mp_irq.srcbus = MP_ISA_BUS; - mp_irq.srcbusirq = bus_irq; /* IRQ */ - mp_irq.dstapic = mpc_ioapic_id(ioapic); /* APIC ID */ - mp_irq.dstirq = pin; /* INTIN# */ - - mp_save_irq(&mp_irq); - + if (mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi) < 0) + return; /* * Reset default identity mapping if gsi is also an legacy IRQ, * otherwise there will be more than one entry with the same GSI From 58a8bb5eef0e2bedeef8d01f4176e604ff04a07a Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Tue, 9 Jan 2018 13:40:41 -0800 Subject: [PATCH 1287/3326] 8021q: fix a memory leak for VLAN 0 device [ Upstream commit 78bbb15f2239bc8e663aa20bbe1987c91a0b75f6 ] A vlan device with vid 0 is allow to creat by not able to be fully cleaned up by unregister_vlan_dev() which checks for vlan_id!=0. Also, VLAN 0 is probably not a valid number and it is kinda "reserved" for HW accelerating devices, but it is probably too late to reject it from creation even if makes sense. Instead, just remove the check in unregister_vlan_dev(). Reported-by: Dmitry Vyukov Fixes: ad1afb003939 ("vlan_dev: VLAN 0 should be treated as "no vlan tag" (802.1p packet)") Cc: Vlad Yasevich Cc: Ben Hutchings Signed-off-by: Cong Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/8021q/vlan.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 4a72ee4e2ae96b..cf2e70003a534f 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -111,12 +111,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) vlan_gvrp_uninit_applicant(real_dev); } - /* Take it out of our own structures, but be sure to interlock with - * HW accelerating devices or SW vlan input packet processing if - * VLAN is not 0 (leave it there for 802.1p). - */ - if (vlan_id) - vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); + vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); /* Get rid of the vlan's reference to real_dev */ dev_put(real_dev); From b841bff14947b92523a5858db9d401f0e9588ed0 Mon Sep 17 00:00:00 2001 From: Eli Cooper Date: Mon, 25 Dec 2017 10:43:49 +0800 Subject: [PATCH 1288/3326] ip6_tunnel: disable dst caching if tunnel is dual-stack [ Upstream commit 23263ec86a5f44312d2899323872468752324107 ] When an ip6_tunnel is in mode 'any', where the transport layer protocol can be either 4 or 41, dst_cache must be disabled. This is because xfrm policies might apply to only one of the two protocols. Caching dst would cause xfrm policies for one protocol incorrectly used for the other. Signed-off-by: Eli Cooper Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_tunnel.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index ef958d50746b5e..3f46121ad139b7 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1081,10 +1081,11 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); neigh_release(neigh); } - } else if (!(t->parms.flags & - (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { - /* enable the cache only only if the routing decision does - * not depend on the current inner header value + } else if (t->parms.proto != 0 && !(t->parms.flags & + (IP6_TNL_F_USE_ORIG_TCLASS | + IP6_TNL_F_USE_ORIG_FWMARK))) { + /* enable the cache only if neither the outer protocol nor the + * routing decision depends on the current inner header value */ use_cache = true; } From ecb89764ef039dfb651d868318e6ee3deb7e172f Mon Sep 17 00:00:00 2001 From: Andrii Vladyka Date: Thu, 4 Jan 2018 13:09:17 +0200 Subject: [PATCH 1289/3326] net: core: fix module type in sock_diag_bind [ Upstream commit b8fd0823e0770c2d5fdbd865bccf0d5e058e5287 ] Use AF_INET6 instead of AF_INET in IPv6-related code path Signed-off-by: Andrii Vladyka Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/sock_diag.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index 217f4e3b82f6ed..146b50e30659da 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -288,7 +288,7 @@ static int sock_diag_bind(struct net *net, int group) case SKNLGRP_INET6_UDP_DESTROY: if (!sock_diag_handlers[AF_INET6]) request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, - NETLINK_SOCK_DIAG, AF_INET); + NETLINK_SOCK_DIAG, AF_INET6); break; } return 0; From 5837f963300b63264dd1baf32e96063e807ca28c Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 26 Dec 2017 23:15:12 +0000 Subject: [PATCH 1290/3326] phylink: ensure we report link down when LOS asserted [ Upstream commit ac817f5ad066697e4d4d35ec68c974eba2c5f17a ] Although we disable the netdev carrier, we fail to report in the kernel log that the link went down. Fix this. Fixes: 9525ae83959b ("phylink: add phylink infrastructure") Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/phylink.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 4b377b978a0be4..cb85307f125b3c 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1428,9 +1428,8 @@ static void phylink_sfp_link_down(void *upstream) WARN_ON(!lockdep_rtnl_is_held()); set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); + queue_work(system_power_efficient_wq, &pl->resolve); flush_work(&pl->resolve); - - netif_carrier_off(pl->netdev); } static void phylink_sfp_link_up(void *upstream) From 5d127d15ad2f9f33af788f1edf720ba29317f55b Mon Sep 17 00:00:00 2001 From: Mohamed Ghannam Date: Tue, 2 Jan 2018 19:44:34 +0000 Subject: [PATCH 1291/3326] RDS: Heap OOB write in rds_message_alloc_sgs() [ Upstream commit c095508770aebf1b9218e77026e48345d719b17c ] When args->nr_local is 0, nr_pages gets also 0 due some size calculation via rds_rm_size(), which is later used to allocate pages for DMA, this bug produces a heap Out-Of-Bound write access to a specific memory region. Signed-off-by: Mohamed Ghannam Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/rds/rdma.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/rds/rdma.c b/net/rds/rdma.c index bc2f1e0977d657..94729d9da43721 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -525,6 +525,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args) local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; + if (args->nr_local == 0) + return -EINVAL; + /* figure out the number of pages in the vector */ for (i = 0; i < args->nr_local; i++) { if (copy_from_user(&vec, &local_vec[i], From 5edbe3c0249f54578636b71377861d579b1781cf Mon Sep 17 00:00:00 2001 From: Mohamed Ghannam Date: Wed, 3 Jan 2018 21:06:06 +0000 Subject: [PATCH 1292/3326] RDS: null pointer dereference in rds_atomic_free_op [ Upstream commit 7d11f77f84b27cef452cee332f4e469503084737 ] set rm->atomic.op_active to 0 when rds_pin_pages() fails or the user supplied address is invalid, this prevents a NULL pointer usage in rds_atomic_free_op() Signed-off-by: Mohamed Ghannam Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/rds/rdma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 94729d9da43721..634cfcb7bba683 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -877,6 +877,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, err: if (page) put_page(page); + rm->atomic.op_active = 0; kfree(rm->atomic.op_notifier); return ret; From 90437795272dfe4b5abaa6dcc612ed0b304b4fb5 Mon Sep 17 00:00:00 2001 From: Fugang Duan Date: Wed, 3 Jan 2018 10:39:29 +0800 Subject: [PATCH 1293/3326] net: fec: restore dev_id in the cases of probe error [ Upstream commit e90f686b4358d7d7e5dbaa48b8e78c9a4e41826e ] The static variable dev_id always plus one before netdev registerred. It should restore the dev_id value in the cases of probe error. Signed-off-by: Fugang Duan Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/freescale/fec_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index faf7cdc97ebfc5..2250639beef040 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3543,6 +3543,7 @@ fec_probe(struct platform_device *pdev) of_node_put(phy_node); failed_ioremap: free_netdev(ndev); + dev_id--; return ret; } From e7e73b10d690c5352cb11b1941a09e4f3dc4c8ce Mon Sep 17 00:00:00 2001 From: Fugang Duan Date: Wed, 3 Jan 2018 10:39:30 +0800 Subject: [PATCH 1294/3326] net: fec: defer probe if regulator is not ready [ Upstream commit 3f38c683033a9a0a2738e7067f449deefabfa3ef ] Defer probe if regulator is not ready. E.g. some regulator is fixed regulator controlled by i2c expander gpio, the i2c device may be probed after the driver, then it should handle the case of defer probe error. Signed-off-by: Fugang Duan Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/freescale/fec_main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2250639beef040..768f031a27d819 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3458,6 +3458,10 @@ fec_probe(struct platform_device *pdev) goto failed_regulator; } } else { + if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto failed_regulator; + } fep->reg_phy = NULL; } From d48c6464c50ef8e7eac7a04bbd2afcc23f020b21 Mon Sep 17 00:00:00 2001 From: Fugang Duan Date: Thu, 4 Jan 2018 10:47:20 +0800 Subject: [PATCH 1295/3326] net: fec: free/restore resource in related probe error pathes [ Upstream commit d1616f07e8f1a4a490d1791316d4a68906b284aa ] Fixes in probe error path: - Restore dev_id before failed_ioremap path. Fixes: ("net: fec: restore dev_id in the cases of probe error") - Call of_node_put(phy_node) before failed_phy path. Fixes: ("net: fec: Support phys probed from devicetree and fixed-link") Signed-off-by: Fugang Duan Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/freescale/fec_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 768f031a27d819..311539c6625f42 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3543,11 +3543,11 @@ fec_probe(struct platform_device *pdev) failed_clk: if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); -failed_phy: of_node_put(phy_node); +failed_phy: + dev_id--; failed_ioremap: free_netdev(ndev); - dev_id--; return ret; } From b4bab9461f89cde8ba11668cbb27a8a79b741416 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Fri, 5 Jan 2018 11:17:17 -0200 Subject: [PATCH 1296/3326] sctp: do not retransmit upon FragNeeded if PMTU discovery is disabled [ Upstream commit cc35c3d1edf7a8373a1a5daa80a912dec96a9cd5 ] Currently, if PMTU discovery is disabled on a given transport, but the configured value is higher than the actual PMTU, it is likely that we will get some icmp Frag Needed. The issue is, if PMTU discovery is disabled, we won't update the information and will issue a retransmission immediately, which may very well trigger another ICMP, and another retransmission, leading to a loop. The fix is to simply not trigger immediate retransmissions if PMTU discovery is disabled on the given transport. Changes from v2: - updated stale comment, noticed by Xin Long Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/input.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/net/sctp/input.c b/net/sctp/input.c index 621b5ca3fd1c17..9320661cc41da0 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -399,20 +399,20 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, return; } - if (t->param_flags & SPP_PMTUD_ENABLE) { - /* Update transports view of the MTU */ - sctp_transport_update_pmtu(t, pmtu); + if (!(t->param_flags & SPP_PMTUD_ENABLE)) + /* We can't allow retransmitting in such case, as the + * retransmission would be sized just as before, and thus we + * would get another icmp, and retransmit again. + */ + return; - /* Update association pmtu. */ - sctp_assoc_sync_pmtu(asoc); - } + /* Update transports view of the MTU */ + sctp_transport_update_pmtu(t, pmtu); - /* Retransmit with the new pmtu setting. - * Normally, if PMTU discovery is disabled, an ICMP Fragmentation - * Needed will never be sent, but if a message was sent before - * PMTU discovery was disabled that was larger than the PMTU, it - * would not be fragmented, so it must be re-transmitted fragmented. - */ + /* Update association pmtu. */ + sctp_assoc_sync_pmtu(asoc); + + /* Retransmit with the new pmtu setting. */ sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD); } From d47da4eb8a60ce99149848ca03ca6c50e1a68bc4 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Fri, 5 Jan 2018 11:17:18 -0200 Subject: [PATCH 1297/3326] sctp: fix the handling of ICMP Frag Needed for too small MTUs [ Upstream commit b6c5734db07079c9410147b32407f2366d584e6c ] syzbot reported a hang involving SCTP, on which it kept flooding dmesg with the message: [ 246.742374] sctp: sctp_transport_update_pmtu: Reported pmtu 508 too low, using default minimum of 512 That happened because whenever SCTP hits an ICMP Frag Needed, it tries to adjust to the new MTU and triggers an immediate retransmission. But it didn't consider the fact that MTUs smaller than the SCTP minimum MTU allowed (512) would not cause the PMTU to change, and issued the retransmission anyway (thus leading to another ICMP Frag Needed, and so on). As IPv4 (ip_rt_min_pmtu=556) and IPv6 (IPV6_MIN_MTU=1280) minimum MTU are higher than that, sctp_transport_update_pmtu() is changed to re-fetch the PMTU that got set after our request, and with that, detect if there was an actual change or not. The fix, thus, skips the immediate retransmission if the received ICMP resulted in no change, in the hope that SCTP will select another path. Note: The value being used for the minimum MTU (512, SCTP_DEFAULT_MINSEGMENT) is not right and instead it should be (576, SCTP_MIN_PMTU), but such change belongs to another patch. Changes from v1: - do not disable PMTU discovery, in the light of commit 06ad391919b2 ("[SCTP] Don't disable PMTU discovery when mtu is small") and as suggested by Xin Long. - changed the way to break the rtx loop by detecting if the icmp resulted in a change or not Changes from v2: none See-also: https://lkml.org/lkml/2017/12/22/811 Reported-by: syzbot Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/sctp/structs.h | 2 +- net/sctp/input.c | 8 ++++++-- net/sctp/transport.c | 29 +++++++++++++++++++---------- 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 0477945de1a3cf..8e1e1dc490fd9c 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -955,7 +955,7 @@ void sctp_transport_burst_limited(struct sctp_transport *); void sctp_transport_burst_reset(struct sctp_transport *); unsigned long sctp_transport_timeout(struct sctp_transport *); void sctp_transport_reset(struct sctp_transport *t); -void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu); +bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu); void sctp_transport_immediate_rtx(struct sctp_transport *); void sctp_transport_dst_release(struct sctp_transport *t); void sctp_transport_dst_confirm(struct sctp_transport *t); diff --git a/net/sctp/input.c b/net/sctp/input.c index 9320661cc41da0..141c9c466ec172 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -406,8 +406,12 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, */ return; - /* Update transports view of the MTU */ - sctp_transport_update_pmtu(t, pmtu); + /* Update transports view of the MTU. Return if no update was needed. + * If an update wasn't needed/possible, it also doesn't make sense to + * try to retransmit now. + */ + if (!sctp_transport_update_pmtu(t, pmtu)) + return; /* Update association pmtu. */ sctp_assoc_sync_pmtu(asoc); diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 2d9bd3776bc830..7ef77fd7b52a1e 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -251,28 +251,37 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } -void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) +bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) { struct dst_entry *dst = sctp_transport_dst_check(t); + bool change = true; if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { - pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", - __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); - /* Use default minimum segment size and disable - * pmtu discovery on this transport. - */ - t->pathmtu = SCTP_DEFAULT_MINSEGMENT; - } else { - t->pathmtu = pmtu; + pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n", + __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); + /* Use default minimum segment instead */ + pmtu = SCTP_DEFAULT_MINSEGMENT; } + pmtu = SCTP_TRUNC4(pmtu); if (dst) { dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu); dst = sctp_transport_dst_check(t); } - if (!dst) + if (!dst) { t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk); + dst = t->dst; + } + + if (dst) { + /* Re-fetch, as under layers may have a higher minimum size */ + pmtu = SCTP_TRUNC4(dst_mtu(dst)); + change = t->pathmtu != pmtu; + } + t->pathmtu = pmtu; + + return change; } /* Caches the dst entry and source address for a transport's destination From 47b5886f7310716dfb4b20f4cdc888fb59932b79 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 3 Jan 2018 20:09:49 +0300 Subject: [PATCH 1298/3326] sh_eth: fix TSU resource handling [ Upstream commit dfe8266b8dd10e12a731c985b725fcf7f0e537f0 ] When switching the driver to the managed device API, I managed to break the case of a dual Ether devices sharing a single TSU: the 2nd Ether port wouldn't probe. Iwamatsu-san has tried to fix this but his patch was buggy and he then dropped the ball... The solution is to limit calling devm_request_mem_region() to the first of the two ports sharing the same TSU, so devm_ioremap_resource() can't be used anymore for the TSU resource... Fixes: d5e07e69218f ("sh_eth: use managed device API") Reported-by: Nobuhiro Iwamatsu Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/renesas/sh_eth.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d2e88a30f57bb4..f0513fcce13c2f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3212,10 +3212,29 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* ioremap the TSU registers */ if (mdp->cd->tsu) { struct resource *rtsu; + rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); - mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); - if (IS_ERR(mdp->tsu_addr)) { - ret = PTR_ERR(mdp->tsu_addr); + if (!rtsu) { + dev_err(&pdev->dev, "no TSU resource\n"); + ret = -ENODEV; + goto out_release; + } + /* We can only request the TSU region for the first port + * of the two sharing this TSU for the probe to succeed... + */ + if (devno % 2 == 0 && + !devm_request_mem_region(&pdev->dev, rtsu->start, + resource_size(rtsu), + dev_name(&pdev->dev))) { + dev_err(&pdev->dev, "can't request TSU resource.\n"); + ret = -EBUSY; + goto out_release; + } + mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, + resource_size(rtsu)); + if (!mdp->tsu_addr) { + dev_err(&pdev->dev, "TSU region ioremap() failed.\n"); + ret = -ENOMEM; goto out_release; } mdp->port = devno % 2; From db21b74346d7cbbf4cca94fac6b634b3e52e9d56 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Wed, 3 Jan 2018 16:46:29 +0100 Subject: [PATCH 1299/3326] net: stmmac: enable EEE in MII, GMII or RGMII only [ Upstream commit 879626e3a52630316d817cbda7cec9a5446d1d82 ] Note in the databook - Section 4.4 - EEE : " The EEE feature is not supported when the MAC is configured to use the TBI, RTBI, SMII, RMII or SGMII single PHY interface. Even if the MAC supports multiple PHY interfaces, you should activate the EEE mode only when the MAC is operating with GMII, MII, or RGMII interface." Applying this restriction solves a stability issue observed on Amlogic gxl platforms operating with RMII interface and the internal PHY. Fixes: 83bf79b6bb64 ("stmmac: disable at run-time the EEE if not supported") Signed-off-by: Jerome Brunet Tested-by: Arnaud Patard Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 28c4d6fa096c95..0ad12c81a9e457 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) bool stmmac_eee_init(struct stmmac_priv *priv) { struct net_device *ndev = priv->dev; + int interface = priv->plat->interface; unsigned long flags; bool ret = false; + if ((interface != PHY_INTERFACE_MODE_MII) && + (interface != PHY_INTERFACE_MODE_GMII) && + !phy_interface_mode_is_rgmii(interface)) + goto out; + /* Using PCS we cannot dial with the phy registers at this stage * so we do not support extra feature like EEE. */ From 705813e942332a4eb63f0cfcb3cd054b4e1d34a7 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Thu, 4 Jan 2018 21:06:49 +0300 Subject: [PATCH 1300/3326] sh_eth: fix SH7757 GEther initialization [ Upstream commit 5133550296d43236439494aa955bfb765a89f615 ] Renesas SH7757 has 2 Fast and 2 Gigabit Ether controllers, while the 'sh_eth' driver can only reset and initialize TSU of the first controller pair. Shimoda-san tried to solve that adding the 'needs_init' member to the 'struct sh_eth_plat_data', however the platform code still never sets this flag. I think that we can infer this information from the 'devno' variable (set to 'platform_device::id') and reset/init the Ether controller pair only for an even 'devno'; therefore 'sh_eth_plat_data::needs_init' can be removed... Fixes: 150647fb2c31 ("net: sh_eth: change the condition of initialization") Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/renesas/sh_eth.c | 4 ++-- include/linux/sh_eth.h | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f0513fcce13c2f..db31963c5d9dd9 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3241,8 +3241,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev) ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; } - /* initialize first or needed device */ - if (!devno || pd->needs_init) { + /* Need to init only the first port of the two sharing a TSU */ + if (devno % 2 == 0) { if (mdp->cd->chip_reset) mdp->cd->chip_reset(ndev); diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h index ff3642d267f7f3..94081e9a501060 100644 --- a/include/linux/sh_eth.h +++ b/include/linux/sh_eth.h @@ -17,7 +17,6 @@ struct sh_eth_plat_data { unsigned char mac_addr[ETH_ALEN]; unsigned no_ether_link:1; unsigned ether_link_active_low:1; - unsigned needs_init:1; }; #endif From d3ccc74c8998de058f40fecff71ada3dc49157b3 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 10 Jan 2018 03:45:49 -0800 Subject: [PATCH 1301/3326] ipv6: fix possible mem leaks in ipv6_make_skb() [ Upstream commit 862c03ee1deb7e19e0f9931682e0294ecd1fcaf9 ] ip6_setup_cork() might return an error, while memory allocations have been done and must be rolled back. Fixes: 6422398c2ab0 ("ipv6: introduce ipv6_make_skb") Signed-off-by: Eric Dumazet Cc: Vlad Yasevich Reported-by: Mike Maloney Acked-by: Mike Maloney Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_output.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index f7dd51c4231415..688ba5f7516b37 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1735,9 +1735,10 @@ struct sk_buff *ip6_make_skb(struct sock *sk, cork.base.opt = NULL; v6_cork.opt = NULL; err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6); - if (err) + if (err) { + ip6_cork_release(&cork, &v6_cork); return ERR_PTR(err); - + } if (ipc6->dontfrag < 0) ipc6->dontfrag = inet6_sk(sk)->dontfrag; From d7e3ea5326cb668f457624476d6dc924c6585658 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 29 Dec 2017 10:02:52 -0800 Subject: [PATCH 1302/3326] ethtool: do not print warning for applications using legacy API [ Upstream commit 71891e2dab6b55a870f8f7735e44a2963860b5c6 ] In kernel log ths message appears on every boot: "warning: `NetworkChangeNo' uses legacy ethtool link settings API, link modes are only partially reported" When ethtool link settings API changed, it started complaining about usages of old API. Ironically, the original patch was from google but the application using the legacy API is chrome. Linux ABI is fixed as much as possible. The kernel must not break it and should not complain about applications using legacy API's. This patch just removes the warning since using legacy API's in Linux is perfectly acceptable. Fixes: 3f1ac7a700d0 ("net: ethtool: add new ETHTOOL_xLINKSETTINGS API") Signed-off-by: Stephen Hemminger Signed-off-by: David Decotigny Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/ethtool.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 9a9a3d77e3274f..d374a904f1b172 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -754,15 +754,6 @@ static int ethtool_set_link_ksettings(struct net_device *dev, return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); } -static void -warn_incomplete_ethtool_legacy_settings_conversion(const char *details) -{ - char name[sizeof(current->comm)]; - - pr_info_once("warning: `%s' uses legacy ethtool link settings API, %s\n", - get_task_comm(name, current), details); -} - /* Query device for its ethtool_cmd settings. * * Backward compatibility note: for compatibility with legacy ethtool, @@ -789,10 +780,8 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) &link_ksettings); if (err < 0) return err; - if (!convert_link_ksettings_to_legacy_settings(&cmd, - &link_ksettings)) - warn_incomplete_ethtool_legacy_settings_conversion( - "link modes are only partially reported"); + convert_link_ksettings_to_legacy_settings(&cmd, + &link_ksettings); /* send a sensible cmd tag back to user */ cmd.cmd = ETHTOOL_GSET; From 3ef57b767e1b70ca594d5c6f7de687326cedcc7f Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 25 Dec 2017 08:57:35 +0100 Subject: [PATCH 1303/3326] mlxsw: spectrum_router: Fix NULL pointer deref [ Upstream commit 8764a8267b128405cf383157d5e9a4a3735d2409 ] When we remove the neighbour associated with a nexthop we should always refuse to write the nexthop to the adjacency table. Regardless if it is already present in the table or not. Otherwise, we risk dereferencing the NULL pointer that was set instead of the neighbour. Fixes: a7ff87acd995 ("mlxsw: spectrum_router: Implement next-hop routing") Signed-off-by: Ido Schimmel Reported-by: Alexander Petrovskiy Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 5189022a1c8c33..c23cc51bb5a5ba 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2536,7 +2536,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, { if (!removing) nh->should_offload = 1; - else if (nh->offloaded) + else nh->should_offload = 0; nh->update = 1; } From 71e7f85e10f189f4aa0162d6eea4a7163a48e3a6 Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Tue, 26 Dec 2017 07:48:51 +0200 Subject: [PATCH 1304/3326] net/sched: Fix update of lastuse in act modules implementing stats_update [ Upstream commit 3bb23421a504f01551b7cb9dff0e41dbf16656b0 ] We need to update lastuse to to the most updated value between what is already set and the new value. If HW matching fails, i.e. because of an issue, the stats are not updated but it could be that software did match and updated lastuse. Fixes: 5712bf9c5c30 ("net/sched: act_mirred: Use passed lastuse argument") Fixes: 9fea47d93bcc ("net/sched: act_gact: Update statistics when offloaded to hardware") Signed-off-by: Roi Dayan Reviewed-by: Paul Blakey Acked-by: Jiri Pirko Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sched/act_gact.c | 2 +- net/sched/act_mirred.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index e29a48ef7fc348..a0ac42b3ed0652 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -159,7 +159,7 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets, if (action == TC_ACT_SHOT) this_cpu_ptr(gact->common.cpu_qstats)->drops += packets; - tm->lastuse = lastuse; + tm->lastuse = max_t(u64, tm->lastuse, lastuse); } static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 416627c66f081f..6ce8de373f8351 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -238,7 +238,7 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, struct tcf_t *tm = &m->tcf_tm; _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); - tm->lastuse = lastuse; + tm->lastuse = max_t(u64, tm->lastuse, lastuse); } static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, From 0c21439334069b0cf8cac9f9115918b9b3a11287 Mon Sep 17 00:00:00 2001 From: Mathieu Xhonneux Date: Wed, 10 Jan 2018 13:35:49 +0000 Subject: [PATCH 1305/3326] ipv6: sr: fix TLVs not being copied using setsockopt [ Upstream commit ccc12b11c5332c84442ef120dcd631523be75089 ] Function ipv6_push_rthdr4 allows to add an IPv6 Segment Routing Header to a socket through setsockopt, but the current implementation doesn't copy possible TLVs at the end of the SRH received from userspace. Therefore, the execution of the following branch if (sr_has_hmac(sr_phdr)) { ... } will never complete since the len and type fields of a possible HMAC TLV are not copied, hence seg6_get_tlv_hmac will return an error, and the HMAC will not be computed. This commit adds a memcpy in case TLVs have been appended to the SRH. Fixes: a149e7c7ce81 ("ipv6: sr: add support for SRH injection through setsockopt") Acked-by: David Lebrun Signed-off-by: Mathieu Xhonneux Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/exthdrs.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 95516138e861cc..d6189c2a35e4f6 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -884,6 +884,15 @@ static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto, sr_phdr->segments[0] = **addr_p; *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left]; + if (sr_ihdr->hdrlen > hops * 2) { + int tlvs_offset, tlvs_length; + + tlvs_offset = (1 + hops * 2) << 3; + tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3; + memcpy((char *)sr_phdr + tlvs_offset, + (char *)sr_ihdr + tlvs_offset, tlvs_length); + } + #ifdef CONFIG_IPV6_SEG6_HMAC if (sr_has_hmac(sr_phdr)) { struct net *net = NULL; From ce66902de86a310da28f4ea0a5da0c7bd7c6745b Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 25 Dec 2017 09:05:33 +0100 Subject: [PATCH 1306/3326] mlxsw: spectrum: Relax sanity checks during enslavement [ Upstream commit 90045fc9c78855bdc625a0ab185d97b72a937613 ] Since commit 25cc72a33835 ("mlxsw: spectrum: Forbid linking to devices that have uppers") the driver forbids enslavement to netdevs that already have uppers of their own, as this can result in various ordering problems. This requirement proved to be too strict for some users who need to be able to enslave ports to a bridge that already has uppers. In this case, we can allow the enslavement if the bridge is already known to us, as any configuration performed on top of the bridge was already reflected to the device. Fixes: 25cc72a33835 ("mlxsw: spectrum: Forbid linking to devices that have uppers") Signed-off-by: Ido Schimmel Reported-by: Alexander Petrovskiy Tested-by: Alexander Petrovskiy Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 11 +++++++++-- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 ++ .../net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 6 ++++++ 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3ead7439821c9e..99bd6e88ebc7e2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4235,7 +4235,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, return -EINVAL; if (!info->linking) break; - if (netdev_has_any_upper_dev(upper_dev)) + if (netdev_has_any_upper_dev(upper_dev) && + (!netif_is_bridge_master(upper_dev) || + !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, + upper_dev))) return -EINVAL; if (netif_is_lag_master(upper_dev) && !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, @@ -4347,6 +4350,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct netdev_notifier_changeupper_info *info = ptr; struct net_device *upper_dev; int err = 0; @@ -4358,7 +4362,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, return -EINVAL; if (!info->linking) break; - if (netdev_has_any_upper_dev(upper_dev)) + if (netdev_has_any_upper_dev(upper_dev) && + (!netif_is_bridge_master(upper_dev) || + !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, + upper_dev))) return -EINVAL; break; case NETDEV_CHANGEUPPER: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 84ce83acdc199b..88892d47acaebf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -326,6 +326,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *brport_dev, struct net_device *br_dev); +bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev); /* spectrum.c */ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index d39ffbfcc436fd..f5863e5bec8131 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -134,6 +134,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge, return NULL; } +bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev) +{ + return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); +} + static struct mlxsw_sp_bridge_device * mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, struct net_device *br_dev) From 208c17515130e2d97ab81a52ca0afb1c40722f7b Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 26 Dec 2017 23:15:17 +0000 Subject: [PATCH 1307/3326] sfp: fix sfp-bus oops when removing socket/upstream [ Upstream commit 0b2122e4934c7783d336397864e34ee53aad0965 ] When we remove a socket or upstream, and the other side isn't registered, we dereference a NULL pointer, causing a kernel oops. Fix this. Fixes: ce0aa27ff3f6 ("sfp: add sfp-bus to bridge between network devices and sfp cages") Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/sfp-bus.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 5cb5384697ea75..7ae815bee52d58 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -359,7 +359,8 @@ EXPORT_SYMBOL_GPL(sfp_register_upstream); void sfp_unregister_upstream(struct sfp_bus *bus) { rtnl_lock(); - sfp_unregister_bus(bus); + if (bus->sfp) + sfp_unregister_bus(bus); bus->upstream = NULL; bus->netdev = NULL; rtnl_unlock(); @@ -464,7 +465,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket); void sfp_unregister_socket(struct sfp_bus *bus) { rtnl_lock(); - sfp_unregister_bus(bus); + if (bus->netdev) + sfp_unregister_bus(bus); bus->sfp_dev = NULL; bus->sfp = NULL; bus->socket_ops = NULL; From d01c7936966982fa6723ec80c018c5e89045fde3 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Fri, 15 Dec 2017 14:23:10 -0500 Subject: [PATCH 1308/3326] membarrier: Disable preemption when calling smp_call_function_many() commit 541676078b52f365f53d46ee5517d305cd1b6350 upstream. smp_call_function_many() requires disabling preemption around the call. Signed-off-by: Mathieu Desnoyers Cc: Andrea Parri Cc: Andrew Hunter Cc: Avi Kivity Cc: Benjamin Herrenschmidt Cc: Boqun Feng Cc: Dave Watson Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Maged Michael Cc: Michael Ellerman Cc: Paul E . McKenney Cc: Paul E. McKenney Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20171215192310.25293-1-mathieu.desnoyers@efficios.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- kernel/sched/membarrier.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index dd7908743dab69..9bcbacba82a811 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -89,7 +89,9 @@ static int membarrier_private_expedited(void) rcu_read_unlock(); } if (!fallback) { + preempt_disable(); smp_call_function_many(tmpmask, ipi_mb, NULL, 1); + preempt_enable(); free_cpumask_var(tmpmask); } cpus_read_unlock(); From 3662493dbdad17c3d144cfb73d0c897ccc28cc52 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Dec 2017 14:30:19 -0600 Subject: [PATCH 1309/3326] crypto: algapi - fix NULL dereference in crypto_remove_spawns() commit 9a00674213a3f00394f4e3221b88f2d21fc05789 upstream. syzkaller triggered a NULL pointer dereference in crypto_remove_spawns() via a program that repeatedly and concurrently requests AEADs "authenc(cmac(des3_ede-asm),pcbc-aes-aesni)" and hashes "cmac(des3_ede)" through AF_ALG, where the hashes are requested as "untested" (CRYPTO_ALG_TESTED is set in ->salg_mask but clear in ->salg_feat; this causes the template to be instantiated for every request). Although AF_ALG users really shouldn't be able to request an "untested" algorithm, the NULL pointer dereference is actually caused by a longstanding race condition where crypto_remove_spawns() can encounter an instance which has had spawn(s) "grabbed" but hasn't yet been registered, resulting in ->cra_users still being NULL. We probably should properly initialize ->cra_users earlier, but that would require updating many templates individually. For now just fix the bug in a simple way that can easily be backported: make crypto_remove_spawns() treat a NULL ->cra_users list as empty. Reported-by: syzbot Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/algapi.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/crypto/algapi.c b/crypto/algapi.c index aa699ff6c87656..50eb828db767e8 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -167,6 +167,18 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, spawn->alg = NULL; spawns = &inst->alg.cra_users; + + /* + * We may encounter an unregistered instance here, since + * an instance's spawns are set up prior to the instance + * being registered. An unregistered instance will have + * NULL ->cra_users.next, since ->cra_users isn't + * properly initialized until registration. But an + * unregistered instance cannot have any users, so treat + * it the same as ->cra_users being empty. + */ + if (spawns->next == NULL) + break; } } while ((spawns = crypto_more_spawns(alg, &stack, &top, &secondary_spawns))); From 961becd84df940157cced59d2d3b6ab8be70fa9a Mon Sep 17 00:00:00 2001 From: Masaharu Hayakawa Date: Wed, 13 Dec 2017 11:33:00 +0900 Subject: [PATCH 1310/3326] mmc: renesas_sdhi: Add MODULE_LICENSE commit 967a6a07e95c58eb9c1581d22a1d9c2d1929843f upstream. The following error occurs when loading renesas_sdhi_core.c module, so add MODULE_LICENSE("GPL v2"). renesas_sdhi_core: module license 'unspecified' taints kernel. Signed-off-by: Masaharu Hayakawa Fixes: 9d08428afb72 ("mmc: renesas-sdhi: make renesas_sdhi_sys_dmac main module file") [Shimoda: Added Fixes tag and Cc to the stable ML] Signed-off-by: Yoshihiro Shimoda Reviewed-by: Simon Horman Acked-by: Wolfram Sang Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/renesas_sdhi_core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index fcf7235d5742ae..157e1d9e7725a0 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -667,3 +668,5 @@ int renesas_sdhi_remove(struct platform_device *pdev) return 0; } EXPORT_SYMBOL_GPL(renesas_sdhi_remove); + +MODULE_LICENSE("GPL v2"); From e068cdee6347eda64a2ecf60c14382bb465175a2 Mon Sep 17 00:00:00 2001 From: Florian Margaine Date: Wed, 13 Dec 2017 16:43:59 +0100 Subject: [PATCH 1311/3326] rbd: reacquire lock should update lock owner client id commit edd8ca8015800b354453b891d38960f3a474b7e4 upstream. Otherwise, future operations on this RBD using exclusive-lock are going to require the lock from a non-existent client id. Fixes: 14bb211d324d ("rbd: support updating the lock cookie without releasing the lock") Link: http://tracker.ceph.com/issues/19929 Signed-off-by: Florian Margaine [idryomov@gmail.com: rbd_set_owner_cid() call, __rbd_lock() helper] Signed-off-by: Ilya Dryomov Signed-off-by: Greg Kroah-Hartman --- drivers/block/rbd.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index adc877dfef5c2c..b0b6f52e030742 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3074,13 +3074,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf) mutex_unlock(&rbd_dev->watch_mutex); } +static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie) +{ + struct rbd_client_id cid = rbd_get_cid(rbd_dev); + + strcpy(rbd_dev->lock_cookie, cookie); + rbd_set_owner_cid(rbd_dev, &cid); + queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work); +} + /* * lock_rwsem must be held for write */ static int rbd_lock(struct rbd_device *rbd_dev) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; - struct rbd_client_id cid = rbd_get_cid(rbd_dev); char cookie[32]; int ret; @@ -3095,9 +3103,7 @@ static int rbd_lock(struct rbd_device *rbd_dev) return ret; rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED; - strcpy(rbd_dev->lock_cookie, cookie); - rbd_set_owner_cid(rbd_dev, &cid); - queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work); + __rbd_lock(rbd_dev, cookie); return 0; } @@ -3883,7 +3889,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev) queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0); } else { - strcpy(rbd_dev->lock_cookie, cookie); + __rbd_lock(rbd_dev, cookie); } } From 0b5e3dbf479614c79e03cc2ba361d261e8b0f340 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Thu, 21 Dec 2017 15:35:11 +0100 Subject: [PATCH 1312/3326] rbd: set max_segments to USHRT_MAX commit 21acdf45f4958135940f0b4767185cf911d4b010 upstream. Commit d3834fefcfe5 ("rbd: bump queue_max_segments") bumped max_segments (unsigned short) to max_hw_sectors (unsigned int). max_hw_sectors is set to the number of 512-byte sectors in an object and overflows unsigned short for 32M (largest possible) objects, making the block layer resort to handing us single segment (i.e. single page or even smaller) bios in that case. Fixes: d3834fefcfe5 ("rbd: bump queue_max_segments") Signed-off-by: Ilya Dryomov Reviewed-by: Alex Elder Signed-off-by: Greg Kroah-Hartman --- drivers/block/rbd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index b0b6f52e030742..609227211295a4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4421,7 +4421,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) segment_size = rbd_obj_bytes(&rbd_dev->header); blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); q->limits.max_sectors = queue_max_hw_sectors(q); - blk_queue_max_segments(q, segment_size / SECTOR_SIZE); + blk_queue_max_segments(q, USHRT_MAX); blk_queue_max_segment_size(q, segment_size); blk_queue_io_min(q, segment_size); blk_queue_io_opt(q, segment_size); From 8ec160c5f0a7fbf153a4c0dd8cd520f476b46aa0 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 4 Jan 2018 09:19:13 +0200 Subject: [PATCH 1313/3326] iwlwifi: pcie: fix DMA memory mapping / unmapping commit 943309d4aad6732b905f3f500e6e17e33c211494 upstream. 22000 devices (previously referenced as A000) can support short transmit queues. This means that we have less DMA descriptors (TFD) for those shorter queues. Previous devices must still have 256 TFDs for each queue even if those 256 TFDs point to fewer buffers. When I introduced support for the short queues for 22000 I broke older devices by assuming that they can also have less TFDs in their queues. This led to several problems: 1) the payload of the commands weren't unmapped properly which caused the SWIOTLB to complain at some point. 2) the hardware could get confused and we get hardware crashes. The corresponding bugzilla entries are: https://bugzilla.kernel.org/show_bug.cgi?id=198201 https://bugzilla.kernel.org/show_bug.cgi?id=198265 Fixes: 4ecab5616023 ("iwlwifi: pcie: support short Tx queues for A000 device family") Reviewed-by: Sharon, Sara Signed-off-by: Emmanuel Grumbach Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 10 +++++++--- drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 11 +++-------- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 8 ++++---- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 4fb7647995c396..9875ab5ce18c3f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -666,11 +666,15 @@ static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index) return index & (q->n_window - 1); } -static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, +static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, struct iwl_txq *txq, int idx) { - return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq, - idx); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans->cfg->use_tfh) + idx = iwl_pcie_get_cmd_index(txq, idx); + + return txq->tfds + trans_pcie->tfd_size * idx; } static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index d74613fcb756c8..6f45c8148b2793 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans, static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window */ @@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) lockdep_assert_held(&txq->lock); iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, - iwl_pcie_get_tfd(trans_pcie, txq, idx)); + iwl_pcie_get_tfd(trans, txq, idx)); /* free SKB */ if (txq->entries) { @@ -367,11 +365,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *out_meta) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); - struct iwl_tfh_tfd *tfd = - iwl_pcie_get_tfd(trans_pcie, txq, idx); + struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); dma_addr_t tb_phys; bool amsdu; int i, len, tb1_len, tb2_len, hdr_len; @@ -568,8 +564,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, u8 group_id = iwl_cmd_groupid(cmd->id); const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - struct iwl_tfh_tfd *tfd = - iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); memset(tfd, 0, sizeof(*tfd)); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index c645d10d37072c..4704137a26e0ac 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i, num_tbs; - void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); + void *tfd = iwl_pcie_get_tfd(trans, txq, index); /* Sanity check on number of chunks */ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); @@ -1999,7 +1999,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, } trace_iwlwifi_dev_tx(trans->dev, skb, - iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), + iwl_pcie_get_tfd(trans, txq, txq->write_ptr), trans_pcie->tfd_size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); @@ -2073,7 +2073,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, IEEE80211_CCMP_HDR_LEN : 0; trace_iwlwifi_dev_tx(trans->dev, skb, - iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), + iwl_pcie_get_tfd(trans, txq, txq->write_ptr), trans_pcie->tfd_size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); @@ -2406,7 +2406,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); - tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); + tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), iwl_pcie_tfd_get_num_tbs(trans, tfd)); From d98309da07fdce96f13534494f4c8210325f48fd Mon Sep 17 00:00:00 2001 From: Jia Zhang Date: Mon, 1 Jan 2018 10:04:47 +0800 Subject: [PATCH 1314/3326] x86/microcode/intel: Extend BDW late-loading with a revision check commit b94b7373317164402ff7728d10f7023127a02b60 upstream. Instead of blacklisting all model 79 CPUs when attempting a late microcode loading, limit that only to CPUs with microcode revisions < 0x0b000021 because only on those late loading may cause a system hang. For such processors either: a) a BIOS update which might contain a newer microcode revision or b) the early microcode loading method should be considered. Processors with revisions 0x0b000021 or higher will not experience such hangs. For more details, see erratum BDF90 in document #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family Specification Update) from September 2017. [ bp: Heavily massage commit message and pr_* statements. ] Fixes: 723f2828a98c ("x86/microcode/intel: Disable late loading on model 79") Signed-off-by: Jia Zhang Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Acked-by: Tony Luck Cc: x86-ml Link: http://lkml.kernel.org/r/1514772287-92959-1-git-send-email-qianyue.zj@alibaba-inc.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/microcode/intel.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 8ccdca6d3f9e9b..d9e460fc7a3b30 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -910,8 +910,17 @@ static bool is_blacklisted(unsigned int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); - if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) { - pr_err_once("late loading on model 79 is disabled.\n"); + /* + * Late loading on model 79 with microcode revision less than 0x0b000021 + * may result in a system hang. This behavior is documented in item + * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family). + */ + if (c->x86 == 6 && + c->x86_model == INTEL_FAM6_BROADWELL_X && + c->x86_mask == 0x01 && + c->microcode < 0x0b000021) { + pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); + pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); return true; } From 679090724f774c9865b10ebc2a4b6945ebf3cb1d Mon Sep 17 00:00:00 2001 From: Andrew Honig Date: Wed, 10 Jan 2018 10:12:03 -0800 Subject: [PATCH 1315/3326] KVM: x86: Add memory barrier on vmcs field lookup commit 75f139aaf896d6fdeec2e468ddfa4b2fe469bf40 upstream. This adds a memory barrier when performing a lookup into the vmcs_field_to_offset_table. This is related to CVE-2017-5753. Signed-off-by: Andrew Honig Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 93de00832a8254..e6f291d88f3e37 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -888,8 +888,16 @@ static inline short vmcs_field_to_offset(unsigned long field) { BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); - if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) || - vmcs_field_to_offset_table[field] == 0) + if (field >= ARRAY_SIZE(vmcs_field_to_offset_table)) + return -ENOENT; + + /* + * FIXME: Mitigation for CVE-2017-5753. To be replaced with a + * generic mechanism. + */ + asm("lfence"); + + if (vmcs_field_to_offset_table[field] == 0) return -ENOENT; return vmcs_field_to_offset_table[field]; From fead4387523df571c01b5cb9f88609f0e49c3feb Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Wed, 22 Nov 2017 14:42:21 +1100 Subject: [PATCH 1316/3326] KVM: PPC: Book3S PR: Fix WIMG handling under pHyp commit 6c7d47c33ed323f14f2a3b8de925e831dbaa4e69 upstream. Commit 96df226 ("KVM: PPC: Book3S PR: Preserve storage control bits") added code to preserve WIMG bits but it missed 2 special cases: - a magic page in kvmppc_mmu_book3s_64_xlate() and - guest real mode in kvmppc_handle_pagefault(). For these ptes, WIMG was 0 and pHyp failed on these causing a guest to stop in the very beginning at NIP=0x100 (due to bd9166ffe "KVM: PPC: Book3S PR: Exit KVM on failed mapping"). According to LoPAPR v1.1 14.5.4.1.2 H_ENTER: The hypervisor checks that the WIMG bits within the PTE are appropriate for the physical page number else H_Parameter return. (For System Memory pages WIMG=0010, or, 1110 if the SAO option is enabled, and for IO pages WIMG=01**.) This hence initializes WIMG to non-zero value HPTE_R_M (0x10), as expected by pHyp. [paulus@ozlabs.org - fix compile for 32-bit] Fixes: 96df226 "KVM: PPC: Book3S PR: Preserve storage control bits" Signed-off-by: Alexey Kardashevskiy Tested-by: Ruediger Oertel Reviewed-by: Greg Kurz Tested-by: Greg Kurz Signed-off-by: Paul Mackerras Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kvm/book3s_64_mmu.c | 1 + arch/powerpc/kvm/book3s_pr.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 29ebe2fd58674c..a93d719edc9068 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, gpte->may_read = true; gpte->may_write = true; gpte->page_size = MMU_PAGE_4K; + gpte->wimg = HPTE_R_M; return 0; } diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 69a09444d46e66..e2ef1619845613 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); #define MSR_USER32 MSR_USER #define MSR_USER64 MSR_USER #define HW_PAGE_SIZE PAGE_SIZE +#define HPTE_R_M _PAGE_COHERENT #endif static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) @@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, pte.eaddr = eaddr; pte.vpage = eaddr >> 12; pte.page_size = MMU_PAGE_64K; + pte.wimg = HPTE_R_M; } switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { From 5584082e854f3ac81bb9ad7ee663a4594ca21bdd Mon Sep 17 00:00:00 2001 From: Serhii Popovych Date: Mon, 4 Dec 2017 09:36:41 -0500 Subject: [PATCH 1317/3326] KVM: PPC: Book3S HV: Drop prepare_done from struct kvm_resize_hpt commit 3073774e638ef18d222465fe92bfc8fccb90d288 upstream. Currently the kvm_resize_hpt structure has two fields relevant to the state of an ongoing resize: 'prepare_done', which indicates whether the worker thread has completed or not, and 'error' which indicates whether it was successful or not. Since the success/failure isn't known until completion, this is confusingly redundant. This patch consolidates the information into just the 'error' value: -EBUSY indicates the worked is still in progress, other negative values indicate (completed) failure, 0 indicates successful completion. As a bonus this reduces size of struct kvm_resize_hpt by __alignof__(struct kvm_hpt_info) and saves few bytes of code. While there correct comment in struct kvm_resize_hpt which references a non-existent semaphore (leftover from an early draft). Assert with WARN_ON() in case of HPT allocation thread work runs more than once for resize request or resize_hpt_allocate() returns -EBUSY that is treated specially. Change comparison against zero to make checkpatch.pl happy. Signed-off-by: Serhii Popovych [dwg: Changed BUG_ON()s to WARN_ON()s and altered commit message for clarity] Signed-off-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 44 ++++++++++++++++++----------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 59247af5fd4507..c0fe9322309eee 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -65,11 +65,17 @@ struct kvm_resize_hpt { u32 order; /* These fields protected by kvm->lock */ + + /* Possible values and their usage: + * <0 an error occurred during allocation, + * -EBUSY allocation is in the progress, + * 0 allocation made successfuly. + */ int error; - bool prepare_done; - /* Private to the work thread, until prepare_done is true, - * then protected by kvm->resize_hpt_sem */ + /* Private to the work thread, until error != -EBUSY, + * then protected by kvm->lock. + */ struct kvm_hpt_info hpt; }; @@ -1444,15 +1450,23 @@ static void resize_hpt_prepare_work(struct work_struct *work) struct kvm *kvm = resize->kvm; int err; + if (WARN_ON(resize->error != -EBUSY)) + return; + resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", resize->order); err = resize_hpt_allocate(resize); + /* We have strict assumption about -EBUSY + * when preparing for HPT resize. + */ + if (WARN_ON(err == -EBUSY)) + err = -EINPROGRESS; + mutex_lock(&kvm->lock); resize->error = err; - resize->prepare_done = true; mutex_unlock(&kvm->lock); } @@ -1477,14 +1491,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, if (resize) { if (resize->order == shift) { - /* Suitable resize in progress */ - if (resize->prepare_done) { - ret = resize->error; - if (ret != 0) - resize_hpt_release(kvm, resize); - } else { + /* Suitable resize in progress? */ + ret = resize->error; + if (ret == -EBUSY) ret = 100; /* estimated time in ms */ - } + else if (ret) + resize_hpt_release(kvm, resize); goto out; } @@ -1504,6 +1516,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, ret = -ENOMEM; goto out; } + + resize->error = -EBUSY; resize->order = shift; resize->kvm = kvm; INIT_WORK(&resize->work, resize_hpt_prepare_work); @@ -1558,16 +1572,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, if (!resize || (resize->order != shift)) goto out; - ret = -EBUSY; - if (!resize->prepare_done) - goto out; - ret = resize->error; - if (ret != 0) + if (ret) goto out; ret = resize_hpt_rehash(resize); - if (ret != 0) + if (ret) goto out; resize_hpt_pivot(resize); From c8e754fe3b8e7961cd6127793ffbb38108b29629 Mon Sep 17 00:00:00 2001 From: Serhii Popovych Date: Mon, 4 Dec 2017 09:36:42 -0500 Subject: [PATCH 1318/3326] KVM: PPC: Book3S HV: Fix use after free in case of multiple resize requests commit 4ed11aeefda439c76ddae3ceebcfa4fad111f149 upstream. When serving multiple resize requests following could happen: CPU0 CPU1 ---- ---- kvm_vm_ioctl_resize_hpt_prepare(1); -> schedule_work() /* system_rq might be busy: delay */ kvm_vm_ioctl_resize_hpt_prepare(2); mutex_lock(); if (resize) { ... release_hpt_resize(); } ... resize_hpt_prepare_work() -> schedule_work() { mutex_unlock() /* resize->kvm could be wrong */ struct kvm *kvm = resize->kvm; mutex_lock(&kvm->lock); <<<< UAF ... } i.e. a second resize request with different order could be started by kvm_vm_ioctl_resize_hpt_prepare(), causing the previous request to be free()d when there's still an active worker thread which will try to access it. This leads to a use after free in point marked with UAF on the diagram above. To prevent this from happening, instead of unconditionally releasing a pre-existing resize structure from the prepare ioctl(), we check if the existing structure has an in-progress worker. We do that by checking if the resize->error == -EBUSY, which is safe because the resize->error field is protected by the kvm->lock. If there is an active worker, instead of releasing, we mark the structure as stale by unlinking it from kvm_struct. In the worker thread we check for a stale structure (with kvm->lock held), and in that case abort, releasing the stale structure ourself. We make the check both before and the actual allocation. Strictly, only the check afterwards is needed, the check before is an optimization: if the structure happens to become stale before the worker thread is dispatched, rather than during the allocation, it means we can avoid allocating then immediately freeing a potentially substantial amount of memory. This fixes following or similar host kernel crash message: [ 635.277361] Unable to handle kernel paging request for data at address 0x00000000 [ 635.277438] Faulting instruction address: 0xc00000000052f568 [ 635.277446] Oops: Kernel access of bad area, sig: 11 [#1] [ 635.277451] SMP NR_CPUS=2048 NUMA PowerNV [ 635.277470] Modules linked in: xt_CHECKSUM iptable_mangle ipt_MASQUERADE nf_nat_masquerade_ipv4 iptable_nat nf_nat_ipv4 nf_nat nf_conntrack_ipv4 nf_defrag_ipv4 xt_conntrack nf_conntrack ipt_REJECT nf_reject_ipv4 tun bridge stp llc ebtable_filter ebtables ip6table_filter ip6_tables iptable_filter nfsv3 nfs_acl nfs lockd grace fscache kvm_hv kvm rpcrdma sunrpc ib_isert iscsi_target_mod ib_iser libiscsi scsi_transport_iscsi ib_srpt target_core_mod ext4 ib_srp scsi_transport_srp ib_ipoib mbcache jbd2 rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ocrdma(T) ib_core ses enclosure scsi_transport_sas sg shpchp leds_powernv ibmpowernv i2c_opal i2c_core powernv_rng ipmi_powernv ipmi_devintf ipmi_msghandler ip_tables xfs libcrc32c sr_mod sd_mod cdrom lpfc nvme_fc(T) nvme_fabrics nvme_core ipr nvmet_fc(T) tg3 nvmet libata be2net crc_t10dif crct10dif_generic scsi_transport_fc ptp scsi_tgt pps_core crct10dif_common dm_mirror dm_region_hash dm_log dm_mod [ 635.278687] CPU: 40 PID: 749 Comm: kworker/40:1 Tainted: G ------------ T 3.10.0.bz1510771+ #1 [ 635.278782] Workqueue: events resize_hpt_prepare_work [kvm_hv] [ 635.278851] task: c0000007e6840000 ti: c0000007e9180000 task.ti: c0000007e9180000 [ 635.278919] NIP: c00000000052f568 LR: c0000000009ea310 CTR: c0000000009ea4f0 [ 635.278988] REGS: c0000007e91837f0 TRAP: 0300 Tainted: G ------------ T (3.10.0.bz1510771+) [ 635.279077] MSR: 9000000100009033 CR: 24002022 XER: 00000000 [ 635.279248] CFAR: c000000000009368 DAR: 0000000000000000 DSISR: 40000000 SOFTE: 1 GPR00: c0000000009ea310 c0000007e9183a70 c000000001250b00 c0000007e9183b10 GPR04: 0000000000000000 0000000000000000 c0000007e9183650 0000000000000000 GPR08: c0000007ffff7b80 00000000ffffffff 0000000080000028 d00000000d2529a0 GPR12: 0000000000002200 c000000007b56800 c000000000120028 c0000007f135bb40 GPR16: 0000000000000000 c000000005c1e018 c000000005c1e018 0000000000000000 GPR20: 0000000000000001 c0000000011bf778 0000000000000001 fffffffffffffef7 GPR24: 0000000000000000 c000000f1e262e50 0000000000000002 c0000007e9180000 GPR28: c000000f1e262e4c c000000f1e262e50 0000000000000000 c0000007e9183b10 [ 635.280149] NIP [c00000000052f568] __list_add+0x38/0x110 [ 635.280197] LR [c0000000009ea310] __mutex_lock_slowpath+0xe0/0x2c0 [ 635.280253] Call Trace: [ 635.280277] [c0000007e9183af0] [c0000000009ea310] __mutex_lock_slowpath+0xe0/0x2c0 [ 635.280356] [c0000007e9183b70] [c0000000009ea554] mutex_lock+0x64/0x70 [ 635.280426] [c0000007e9183ba0] [d00000000d24da04] resize_hpt_prepare_work+0xe4/0x1c0 [kvm_hv] [ 635.280507] [c0000007e9183c40] [c000000000113c0c] process_one_work+0x1dc/0x680 [ 635.280587] [c0000007e9183ce0] [c000000000114250] worker_thread+0x1a0/0x520 [ 635.280655] [c0000007e9183d80] [c00000000012010c] kthread+0xec/0x100 [ 635.280724] [c0000007e9183e30] [c00000000000a4b8] ret_from_kernel_thread+0x5c/0xa4 [ 635.280814] Instruction dump: [ 635.280880] 7c0802a6 fba1ffe8 fbc1fff0 7cbd2b78 fbe1fff8 7c9e2378 7c7f1b78 f8010010 [ 635.281099] f821ff81 e8a50008 7fa52040 40de00b8 7fbd2840 40de008c 7fbff040 [ 635.281324] ---[ end trace b628b73449719b9d ]--- Fixes: b5baa6877315 ("KVM: PPC: Book3S HV: KVM-HV HPT resizing implementation") Signed-off-by: Serhii Popovych [dwg: Replaced BUG_ON()s with WARN_ONs() and reworded commit message for clarity] Signed-off-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 50 ++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index c0fe9322309eee..b2e775d84c573d 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -1430,16 +1430,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize) static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) { - BUG_ON(kvm->arch.resize_hpt != resize); + if (WARN_ON(!mutex_is_locked(&kvm->lock))) + return; if (!resize) return; - if (resize->hpt.virt) - kvmppc_free_hpt(&resize->hpt); + if (resize->error != -EBUSY) { + if (resize->hpt.virt) + kvmppc_free_hpt(&resize->hpt); + kfree(resize); + } - kvm->arch.resize_hpt = NULL; - kfree(resize); + if (kvm->arch.resize_hpt == resize) + kvm->arch.resize_hpt = NULL; } static void resize_hpt_prepare_work(struct work_struct *work) @@ -1448,26 +1452,42 @@ static void resize_hpt_prepare_work(struct work_struct *work) struct kvm_resize_hpt, work); struct kvm *kvm = resize->kvm; - int err; + int err = 0; if (WARN_ON(resize->error != -EBUSY)) return; - resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", - resize->order); + mutex_lock(&kvm->lock); - err = resize_hpt_allocate(resize); + /* Request is still current? */ + if (kvm->arch.resize_hpt == resize) { + /* We may request large allocations here: + * do not sleep with kvm->lock held for a while. + */ + mutex_unlock(&kvm->lock); - /* We have strict assumption about -EBUSY - * when preparing for HPT resize. - */ - if (WARN_ON(err == -EBUSY)) - err = -EINPROGRESS; + resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", + resize->order); - mutex_lock(&kvm->lock); + err = resize_hpt_allocate(resize); + + /* We have strict assumption about -EBUSY + * when preparing for HPT resize. + */ + if (WARN_ON(err == -EBUSY)) + err = -EINPROGRESS; + + mutex_lock(&kvm->lock); + /* It is possible that kvm->arch.resize_hpt != resize + * after we grab kvm->lock again. + */ + } resize->error = err; + if (kvm->arch.resize_hpt != resize) + resize_hpt_release(kvm, resize); + mutex_unlock(&kvm->lock); } From 02453a0f8f54a00aff82daa287627c328f121a18 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 10 Jan 2018 17:04:39 +1100 Subject: [PATCH 1319/3326] KVM: PPC: Book3S HV: Always flush TLB in kvmppc_alloc_reset_hpt() commit ecba8297aafd50db6ae867e90844eead1611ef1c upstream. The KVM_PPC_ALLOCATE_HTAB ioctl(), implemented by kvmppc_alloc_reset_hpt() is supposed to completely clear and reset a guest's Hashed Page Table (HPT) allocating or re-allocating it if necessary. In the case where an HPT of the right size already exists and it just zeroes it, it forces a TLB flush on all guest CPUs, to remove any stale TLB entries loaded from the old HPT. However, that situation can arise when the HPT is resizing as well - or even when switching from an RPT to HPT - so those cases need a TLB flush as well. So, move the TLB flush to trigger in all cases except for errors. Fixes: f98a8bf9ee20 ("KVM: PPC: Book3S HV: Allow KVM_PPC_ALLOCATE_HTAB ioctl() to change HPT size") Signed-off-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index b2e775d84c573d..2645d484e945be 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -165,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) * Reset all the reverse-mapping chains for all memslots */ kvmppc_rmap_reset(kvm); - /* Ensure that each vcpu will flush its TLB on next entry. */ - cpumask_setall(&kvm->arch.need_tlb_flush); err = 0; goto out; } @@ -182,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) kvmppc_set_hpt(kvm, &info); out: + if (err == 0) + /* Ensure that each vcpu will flush its TLB on next entry. */ + cpumask_setall(&kvm->arch.need_tlb_flush); + mutex_unlock(&kvm->lock); return err; } From 2f6ff75e484bca49202cdc132787bc5d29d8a139 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 9 Jan 2018 15:33:42 +0100 Subject: [PATCH 1320/3326] drm/vmwgfx: Don't cache framebuffer maps commit 98648ae6ef6bdcdcb88c46cad963906ab452e96d upstream. Buffer objects need to be either pinned or reserved while a map is active, that's not the case here, so avoid caching the framebuffer map. This will cause increasing mapping activity mainly when we don't do page flipping. This fixes occasional garbage filled screens when the framebuffer has been evicted after the map. Since in-kernel mapping of whole buffer objects is error-prone on 32-bit architectures and also quite inefficient, we will revisit this later. Signed-off-by: Thomas Hellstrom Reviewed-by: Sinclair Yeh Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 6 ---- drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 41 ++++++++-------------------- 3 files changed, 13 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b850562fbdd657..62c2f4be8012be 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -697,7 +697,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane) vps->pinned = 0; /* Mapping is managed by prepare_fb/cleanup_fb */ - memset(&vps->guest_map, 0, sizeof(vps->guest_map)); memset(&vps->host_map, 0, sizeof(vps->host_map)); vps->cpp = 0; @@ -760,11 +759,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane, /* Should have been freed by cleanup_fb */ - if (vps->guest_map.virtual) { - DRM_ERROR("Guest mapping not freed\n"); - ttm_bo_kunmap(&vps->guest_map); - } - if (vps->host_map.virtual) { DRM_ERROR("Host mapping not freed\n"); ttm_bo_kunmap(&vps->host_map); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index ff9c8389ff21c3..cd9da2dd79af1a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -175,7 +175,7 @@ struct vmw_plane_state { int pinned; /* For CPU Blit */ - struct ttm_bo_kmap_obj host_map, guest_map; + struct ttm_bo_kmap_obj host_map; unsigned int cpp; }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index ca3afae2db1f13..4dee05b15552bf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -114,7 +114,7 @@ struct vmw_screen_target_display_unit { bool defined; /* For CPU Blit */ - struct ttm_bo_kmap_obj host_map, guest_map; + struct ttm_bo_kmap_obj host_map; unsigned int cpp; }; @@ -695,7 +695,8 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) s32 src_pitch, dst_pitch; u8 *src, *dst; bool not_used; - + struct ttm_bo_kmap_obj guest_map; + int ret; if (!dirty->num_hits) return; @@ -706,6 +707,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) if (width == 0 || height == 0) return; + ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages, + &guest_map); + if (ret) { + DRM_ERROR("Failed mapping framebuffer for blit: %d\n", + ret); + goto out_cleanup; + } /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */ src_pitch = stdu->display_srf->base_size.width * stdu->cpp; @@ -713,7 +721,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) src += ddirty->top * src_pitch + ddirty->left * stdu->cpp; dst_pitch = ddirty->pitch; - dst = ttm_kmap_obj_virtual(&stdu->guest_map, ¬_used); + dst = ttm_kmap_obj_virtual(&guest_map, ¬_used); dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp; @@ -772,6 +780,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) vmw_fifo_commit(dev_priv, sizeof(*cmd)); } + ttm_bo_kunmap(&guest_map); out_cleanup: ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX; ddirty->right = ddirty->bottom = S32_MIN; @@ -1109,9 +1118,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane, { struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); - if (vps->guest_map.virtual) - ttm_bo_kunmap(&vps->guest_map); - if (vps->host_map.virtual) ttm_bo_kunmap(&vps->host_map); @@ -1277,33 +1283,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, */ if (vps->content_fb_type == SEPARATE_DMA && !(dev_priv->capabilities & SVGA_CAP_3D)) { - - struct vmw_framebuffer_dmabuf *new_vfbd; - - new_vfbd = vmw_framebuffer_to_vfbd(new_fb); - - ret = ttm_bo_reserve(&new_vfbd->buffer->base, false, false, - NULL); - if (ret) - goto out_srf_unpin; - - ret = ttm_bo_kmap(&new_vfbd->buffer->base, 0, - new_vfbd->buffer->base.num_pages, - &vps->guest_map); - - ttm_bo_unreserve(&new_vfbd->buffer->base); - - if (ret) { - DRM_ERROR("Failed to map content buffer to CPU\n"); - goto out_srf_unpin; - } - ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0, vps->surf->res.backup->base.num_pages, &vps->host_map); if (ret) { DRM_ERROR("Failed to map display buffer to CPU\n"); - ttm_bo_kunmap(&vps->guest_map); goto out_srf_unpin; } @@ -1350,7 +1334,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, stdu->display_srf = vps->surf; stdu->content_fb_type = vps->content_fb_type; stdu->cpp = vps->cpp; - memcpy(&stdu->guest_map, &vps->guest_map, sizeof(vps->guest_map)); memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map)); if (!stdu->defined) From 9fc16b259ac276da269b97f66ad72a5b182c523a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 10 Jan 2018 12:40:04 +0300 Subject: [PATCH 1321/3326] drm/vmwgfx: Potential off by one in vmw_view_add() commit 0d9cac0ca0429830c40fe1a4e50e60f6221fd7b6 upstream. The vmw_view_cmd_to_type() function returns vmw_view_max (3) on error. It's one element beyond the end of the vmw_view_cotables[] table. My read on this is that it's possible to hit this failure. header->id comes from vmw_cmd_check() and it's a user controlled number between 1040 and 1225 so we can hit that error. But I don't have the hardware to test this code. Fixes: d80efd5cb3de ("drm/vmwgfx: Initial DX support") Signed-off-by: Dan Carpenter Reviewed-by: Thomas Hellstrom Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 21c62a34e5580a..87e8af5776a389 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -2731,6 +2731,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, } view_type = vmw_view_cmd_to_type(header->id); + if (view_type == vmw_view_max) + return -EINVAL; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, From 4675bdd85ca4c9b90f7f051c72856c857b4df43f Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Fri, 29 Dec 2017 02:50:08 +0800 Subject: [PATCH 1322/3326] drm/i915/gvt: Clear the shadow page table entry after post-sync commit 121d760d0788f95619049c63449d977065cab69d upstream. A shadow page table entry needs to be cleared after being set as post-sync. This patch fixes the recent error reported in Win7-32 test. Fixes: 2707e4446688 ("drm/i915/gvt: vGPU graphics memory virtualization") Signed-off-by: Zhi Wang Signed-off-by: Zhenyu Wang Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/gvt/gtt.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index a385838e2919e3..dadacbe558abed 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1359,12 +1359,15 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp, return ret; } else { if (!test_bit(index, spt->post_shadow_bitmap)) { + int type = spt->shadow_page.type; + ppgtt_get_shadow_entry(spt, &se, index); ret = ppgtt_handle_guest_entry_removal(gpt, &se, index); if (ret) return ret; + ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); + ppgtt_set_shadow_entry(spt, &se, index); } - ppgtt_set_post_shadow(spt, index); } From 08a56f347a28d697d2f3cec14bce5ec8ad290bd7 Mon Sep 17 00:00:00 2001 From: Kenneth Graunke Date: Fri, 5 Jan 2018 00:59:05 -0800 Subject: [PATCH 1323/3326] drm/i915: Whitelist SLICE_COMMON_ECO_CHICKEN1 on Geminilake. commit 4636bda86aa1f34f45c629477476a0dcfa04e597 upstream. Geminilake requires the 3D driver to select whether barriers are intended for compute shaders, or tessellation control shaders, by whacking a "Barrier Mode" bit in SLICE_COMMON_ECO_CHICKEN1 when switching pipelines. Failure to do this properly can result in GPU hangs. Unfortunately, this means it needs to switch mid-batch, so only userspace can properly set it. To facilitate this, the kernel needs to whitelist the register. The workarounds page currently tags this as applying to Broxton only, but that doesn't make sense. The documentation for the register it references says the bit userspace is supposed to toggle only exists on Geminilake. Empirically, the Mesa patch to toggle this bit appears to fix intermittent GPU hangs in tessellation control shader barrier tests on Geminilake; we haven't seen those hangs on Broxton. v2: Mention WA #0862 in the comment (it doesn't have a name). Signed-off-by: Kenneth Graunke Acked-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180105085905.9298-1-kenneth@whitecape.org (cherry picked from commit ab062639edb0412daf6de540725276b9a5d217f9) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/i915_reg.h | 2 ++ drivers/gpu/drm/i915/intel_engine_cs.c | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ce2ed16f2a309c..920c8914cec173 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6987,6 +6987,8 @@ enum { #define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308) #define DISABLE_PIXEL_MASK_CAMMING (1<<14) +#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c) + #define GEN7_L3SQCREG1 _MMIO(0xB010) #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 3c2d9cf22ed5a5..b6a7e492c1a314 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1125,6 +1125,11 @@ static int glk_init_workarounds(struct intel_engine_cs *engine) if (ret) return ret; + /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */ + ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1); + if (ret) + return ret; + /* WaToEnableHwFixForPushConstHWBug:glk */ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); From 37928ffde45d9a638c6e8a04966e07f4ee845d1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 8 Nov 2017 15:35:55 +0200 Subject: [PATCH 1324/3326] drm/i915: Move init_clock_gating() back to where it was MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 6ac43272768ca901daac4076a66c2c4e3c7b9321 upstream. Apparently setting up a bunch of GT registers before we've properly initialized the rest of the GT hardware leads to these setting being lost. So looks like I broke HSW with commit b7048ea12fbb ("drm/i915: Do .init_clock_gating() earlier to avoid it clobbering watermarks") by doing init_clock_gating() too early. This should actually affect other platforms as well, but apparently not to such a great degree. What I was ultimately after in that commit was to move the ilk_init_lp_watermarks() call earlier. So let's undo the damage and move init_clock_gating() back to where it was, and call ilk_init_lp_watermarks() just before the watermark state readout. This highlights how fragile and messed up our init order really is. I wonder why we even initialize the display before gem. The opposite order would make much more sense to me... v2: Keep WaRsPkgCStateDisplayPMReq:hsw early as it really must be done before all planes might get disabled. Cc: Chris Wilson Cc: Mark Janes Cc: Maarten Lankhorst Cc: Daniel Vetter Cc: Joonas Lahtinen Cc: Oscar Mateo Cc: Mika Kuoppala Reported-by: Mark Janes Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103549 Fixes: b7048ea12fbb ("drm/i915: Do .init_clock_gating() earlier to avoid it clobbering watermarks") References: https://lists.freedesktop.org/archives/intel-gfx/2017-November/145432.html Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20171108133555.14091-1-ville.syrjala@linux.intel.com Tested-by: Chris Wilson Reviewed-by: Chris Wilson (cherry picked from commit f72b84c677d61f201b869223a8d6e389c7bb7d3d) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_display.c | 14 +++++++-- drivers/gpu/drm/i915/intel_pm.c | 44 ++++++++++++---------------- 2 files changed, 30 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1c73d55426819f..095a2240af4f42 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3800,6 +3800,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) intel_pps_unlock_regs_wa(dev_priv); intel_modeset_init_hw(dev); + intel_init_clock_gating(dev_priv); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) @@ -14406,8 +14407,6 @@ void intel_modeset_init_hw(struct drm_device *dev) intel_update_cdclk(dev_priv); dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw; - - intel_init_clock_gating(dev_priv); } /* @@ -15124,6 +15123,15 @@ intel_modeset_setup_hw_state(struct drm_device *dev, struct intel_encoder *encoder; int i; + if (IS_HASWELL(dev_priv)) { + /* + * WaRsPkgCStateDisplayPMReq:hsw + * System hang if this isn't done before disabling all planes! + */ + I915_WRITE(CHICKEN_PAR1_1, + I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); + } + intel_modeset_readout_hw_state(dev); /* HW state is read out, now we need to sanitize this mess. */ @@ -15220,6 +15228,8 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_init_gt_powersave(dev_priv); + intel_init_clock_gating(dev_priv); + intel_setup_overlay(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index cb950752c34695..014e5c08571a4d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5669,12 +5669,30 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->wm.wm_mutex); } +/* + * FIXME should probably kill this and improve + * the real watermark readout/sanitation instead + */ +static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) +{ + I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); + I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); + I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); + + /* + * Don't touch WM1S_LP_EN here. + * Doing so could cause underruns. + */ +} + void ilk_wm_get_hw_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct ilk_wm_values *hw = &dev_priv->wm.hw; struct drm_crtc *crtc; + ilk_init_lp_watermarks(dev_priv); + for_each_crtc(dev, crtc) ilk_pipe_wm_get_hw_state(crtc); @@ -7959,18 +7977,6 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) } } -static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) -{ - I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); - I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); - I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); - - /* - * Don't touch WM1S_LP_EN here. - * Doing so could cause underruns. - */ -} - static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv) { uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; @@ -8004,8 +8010,6 @@ static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv) (I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS)); - ilk_init_lp_watermarks(dev_priv); - /* * Based on the document from hardware guys the following bits * should be set unconditionally in order to enable FBC. @@ -8118,8 +8122,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_GT_MODE, _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); - ilk_init_lp_watermarks(dev_priv); - I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); @@ -8293,8 +8295,6 @@ static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv) { enum pipe pipe; - ilk_init_lp_watermarks(dev_priv); - /* WaSwitchSolVfFArbitrationPriority:bdw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); @@ -8349,8 +8349,6 @@ static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv) static void haswell_init_clock_gating(struct drm_i915_private *dev_priv) { - ilk_init_lp_watermarks(dev_priv); - /* L3 caching of data atomics doesn't work -- disable it. */ I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); I915_WRITE(HSW_ROW_CHICKEN3, @@ -8394,10 +8392,6 @@ static void haswell_init_clock_gating(struct drm_i915_private *dev_priv) /* WaSwitchSolVfFArbitrationPriority:hsw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); - /* WaRsPkgCStateDisplayPMReq:hsw */ - I915_WRITE(CHICKEN_PAR1_1, - I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); - lpt_init_clock_gating(dev_priv); } @@ -8405,8 +8399,6 @@ static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv) { uint32_t snpcr; - ilk_init_lp_watermarks(dev_priv); - I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); /* WaDisableEarlyCull:ivb */ From b00cb5a35082c1b8b0616283a2a8ac834989f0cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 16 Nov 2017 18:02:15 +0200 Subject: [PATCH 1325/3326] drm/i915: Fix init_clock_gating for resume MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 3572f04c69ed4369da5d3c65d84fb18774aa60b6 upstream. Moving the init_clock_gating() call from intel_modeset_init_hw() to intel_modeset_gem_init() had an unintended effect of not applying some workarounds on resume. This, for example, cause some kind of corruption to appear at the top of my IVB Thinkpad X1 Carbon LVDS screen after hibernation. Fix the problem by explicitly calling init_clock_gating() from the resume path. I really hope this doesn't break something else again. At least the problems reported at https://bugs.freedesktop.org/show_bug.cgi?id=103549 didn't make a comeback, even after a hibernate cycle. v2: Reorder the init_clock_gating vs. modeset_init_hw to match the display reset path (Rodrigo) Cc: Chris Wilson Cc: Rodrigo Vivi Fixes: 6ac43272768c ("drm/i915: Move init_clock_gating() back to where it was") Reviewed-by: Rodrigo Vivi Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20171116160215.25715-1-ville.syrjala@linux.intel.com Signed-off-by: Ville Syrjälä (cherry picked from commit 675f7ff35bd256e65d3d0f52718d8babf5d1002a) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/i915_drv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 82498f8232eb04..5c5cb2ceee49ae 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1693,6 +1693,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_guc_resume(dev_priv); intel_modeset_init_hw(dev); + intel_init_clock_gating(dev_priv); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) From a5dbaf87684c81693854a87b4a6c46460fc7a731 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Sun, 7 Jan 2018 17:33:02 -0800 Subject: [PATCH 1326/3326] bpf: prevent out-of-bounds speculation commit b2157399cc9898260d6031c5bfe45fe137c1fbe7 upstream. Under speculation, CPUs may mis-predict branches in bounds checks. Thus, memory accesses under a bounds check may be speculated even if the bounds check fails, providing a primitive for building a side channel. To avoid leaking kernel data round up array-based maps and mask the index after bounds check, so speculated load with out of bounds index will load either valid value from the array or zero from the padded area. Unconditionally mask index for all array types even when max_entries are not rounded to power of 2 for root user. When map is created by unpriv user generate a sequence of bpf insns that includes AND operation to make sure that JITed code includes the same 'index & index_mask' operation. If prog_array map is created by unpriv user replace bpf_tail_call(ctx, map, index); with if (index >= max_entries) { index &= map->index_mask; bpf_tail_call(ctx, map, index); } (along with roundup to power 2) to prevent out-of-bounds speculation. There is secondary redundant 'if (index >= max_entries)' in the interpreter and in all JITs, but they can be optimized later if necessary. Other array-like maps (cpumap, devmap, sockmap, perf_event_array, cgroup_array) cannot be used by unpriv, so no changes there. That fixes bpf side of "Variant 1: bounds check bypass (CVE-2017-5753)" on all architectures with and without JIT. v2->v3: Daniel noticed that attack potentially can be crafted via syscall commands without loading the program, so add masking to those paths as well. Signed-off-by: Alexei Starovoitov Acked-by: John Fastabend Signed-off-by: Daniel Borkmann Cc: Jiri Slaby Signed-off-by: Greg Kroah-Hartman --- include/linux/bpf.h | 2 ++ kernel/bpf/arraymap.c | 47 +++++++++++++++++++++++++++++++++---------- kernel/bpf/verifier.c | 36 +++++++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+), 11 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f1af7d63d67866..0bcf803f20de07 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -51,6 +51,7 @@ struct bpf_map { u32 pages; u32 id; int numa_node; + bool unpriv_array; struct user_struct *user; const struct bpf_map_ops *ops; struct work_struct work; @@ -195,6 +196,7 @@ struct bpf_prog_aux { struct bpf_array { struct bpf_map map; u32 elem_size; + u32 index_mask; /* 'ownership' of prog_array is claimed by the first program that * is going to use this map or by the first program which FD is stored * in the map to make sure that all callers and callees have the same diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index e2636737b69bd8..42a1227bf7ef59 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -50,9 +50,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) { bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; int numa_node = bpf_map_attr_numa_node(attr); + u32 elem_size, index_mask, max_entries; + bool unpriv = !capable(CAP_SYS_ADMIN); struct bpf_array *array; u64 array_size; - u32 elem_size; /* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 4 || @@ -68,11 +69,20 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) elem_size = round_up(attr->value_size, 8); + max_entries = attr->max_entries; + index_mask = roundup_pow_of_two(max_entries) - 1; + + if (unpriv) + /* round up array size to nearest power of 2, + * since cpu will speculate within index_mask limits + */ + max_entries = index_mask + 1; + array_size = sizeof(*array); if (percpu) - array_size += (u64) attr->max_entries * sizeof(void *); + array_size += (u64) max_entries * sizeof(void *); else - array_size += (u64) attr->max_entries * elem_size; + array_size += (u64) max_entries * elem_size; /* make sure there is no u32 overflow later in round_up() */ if (array_size >= U32_MAX - PAGE_SIZE) @@ -82,6 +92,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) array = bpf_map_area_alloc(array_size, numa_node); if (!array) return ERR_PTR(-ENOMEM); + array->index_mask = index_mask; + array->map.unpriv_array = unpriv; /* copy mandatory map attributes */ array->map.map_type = attr->map_type; @@ -117,12 +129,13 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key) if (unlikely(index >= array->map.max_entries)) return NULL; - return array->value + array->elem_size * index; + return array->value + array->elem_size * (index & array->index_mask); } /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) { + struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_insn *insn = insn_buf; u32 elem_size = round_up(map->value_size, 8); const int ret = BPF_REG_0; @@ -131,7 +144,12 @@ static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); - *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); + if (map->unpriv_array) { + *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); + *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); + } else { + *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); + } if (is_power_of_2(elem_size)) { *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); @@ -153,7 +171,7 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) if (unlikely(index >= array->map.max_entries)) return NULL; - return this_cpu_ptr(array->pptrs[index]); + return this_cpu_ptr(array->pptrs[index & array->index_mask]); } int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) @@ -173,7 +191,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) */ size = round_up(map->value_size, 8); rcu_read_lock(); - pptr = array->pptrs[index]; + pptr = array->pptrs[index & array->index_mask]; for_each_possible_cpu(cpu) { bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); off += size; @@ -221,10 +239,11 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value, return -EEXIST; if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) - memcpy(this_cpu_ptr(array->pptrs[index]), + memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]), value, map->value_size); else - memcpy(array->value + array->elem_size * index, + memcpy(array->value + + array->elem_size * (index & array->index_mask), value, map->value_size); return 0; } @@ -258,7 +277,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, */ size = round_up(map->value_size, 8); rcu_read_lock(); - pptr = array->pptrs[index]; + pptr = array->pptrs[index & array->index_mask]; for_each_possible_cpu(cpu) { bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); off += size; @@ -609,6 +628,7 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) static u32 array_of_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) { + struct bpf_array *array = container_of(map, struct bpf_array, map); u32 elem_size = round_up(map->value_size, 8); struct bpf_insn *insn = insn_buf; const int ret = BPF_REG_0; @@ -617,7 +637,12 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map, *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); - *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); + if (map->unpriv_array) { + *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); + *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); + } else { + *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); + } if (is_power_of_2(elem_size)) *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); else diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c5ff809e86d0d3..228c57996c2009 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1701,6 +1701,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); if (err) return err; + if (func_id == BPF_FUNC_tail_call) { + if (meta.map_ptr == NULL) { + verbose("verifier bug\n"); + return -EINVAL; + } + env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr; + } err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); if (err) return err; @@ -4315,6 +4322,35 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) */ insn->imm = 0; insn->code = BPF_JMP | BPF_TAIL_CALL; + + /* instead of changing every JIT dealing with tail_call + * emit two extra insns: + * if (index >= max_entries) goto out; + * index &= array->index_mask; + * to avoid out-of-bounds cpu speculation + */ + map_ptr = env->insn_aux_data[i + delta].map_ptr; + if (map_ptr == BPF_MAP_PTR_POISON) { + verbose("tail_call obusing map_ptr\n"); + return -EINVAL; + } + if (!map_ptr->unpriv_array) + continue; + insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, + map_ptr->max_entries, 2); + insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, + container_of(map_ptr, + struct bpf_array, + map)->index_mask); + insn_buf[2] = *insn; + cnt = 3; + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); + if (!new_prog) + return -ENOMEM; + + delta += cnt - 1; + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; continue; } From 67c05d9414512e1f9040d29e37e3d5533d8c51dd Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 10 Jan 2018 23:25:05 +0100 Subject: [PATCH 1327/3326] bpf, array: fix overflow in max_entries and undefined behavior in index_mask commit bbeb6e4323dad9b5e0ee9f60c223dd532e2403b1 upstream. syzkaller tried to alloc a map with 0xfffffffd entries out of a userns, and thus unprivileged. With the recently added logic in b2157399cc98 ("bpf: prevent out-of-bounds speculation") we round this up to the next power of two value for max_entries for unprivileged such that we can apply proper masking into potentially zeroed out map slots. However, this will generate an index_mask of 0xffffffff, and therefore a + 1 will let this overflow into new max_entries of 0. This will pass allocation, etc, and later on map access we still enforce on the original attr->max_entries value which was 0xfffffffd, therefore triggering GPF all over the place. Thus bail out on overflow in such case. Moreover, on 32 bit archs roundup_pow_of_two() can also not be used, since fls_long(max_entries - 1) can result in 32 and 1UL << 32 in 32 bit space is undefined. Therefore, do this by hand in a 64 bit variable. This fixes all the issues triggered by syzkaller's reproducers. Fixes: b2157399cc98 ("bpf: prevent out-of-bounds speculation") Reported-by: syzbot+b0efb8e572d01bce1ae0@syzkaller.appspotmail.com Reported-by: syzbot+6c15e9744f75f2364773@syzkaller.appspotmail.com Reported-by: syzbot+d2f5524fb46fd3b312ee@syzkaller.appspotmail.com Reported-by: syzbot+61d23c95395cc90dbc2b@syzkaller.appspotmail.com Reported-by: syzbot+0d363c942452cca68c01@syzkaller.appspotmail.com Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Cc: Jiri Slaby Cc: Eric Dumazet Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/arraymap.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 42a1227bf7ef59..a4ae1ca44a5765 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -53,7 +53,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) u32 elem_size, index_mask, max_entries; bool unpriv = !capable(CAP_SYS_ADMIN); struct bpf_array *array; - u64 array_size; + u64 array_size, mask64; /* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 4 || @@ -70,13 +70,25 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) elem_size = round_up(attr->value_size, 8); max_entries = attr->max_entries; - index_mask = roundup_pow_of_two(max_entries) - 1; - if (unpriv) + /* On 32 bit archs roundup_pow_of_two() with max_entries that has + * upper most bit set in u32 space is undefined behavior due to + * resulting 1U << 32, so do it manually here in u64 space. + */ + mask64 = fls_long(max_entries - 1); + mask64 = 1ULL << mask64; + mask64 -= 1; + + index_mask = mask64; + if (unpriv) { /* round up array size to nearest power of 2, * since cpu will speculate within index_mask limits */ max_entries = index_mask + 1; + /* Check for overflows. */ + if (max_entries < attr->max_entries) + return ERR_PTR(-E2BIG); + } array_size = sizeof(*array); if (percpu) From a2e0b5db9b581f78155bd4999b5bb2d0d8037eda Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 10 Jan 2018 20:04:37 +0100 Subject: [PATCH 1328/3326] bpf: arsh is not supported in 32 bit alu thus reject it commit 7891a87efc7116590eaba57acc3c422487802c6f upstream. The following snippet was throwing an 'unknown opcode cc' warning in BPF interpreter: 0: (18) r0 = 0x0 2: (7b) *(u64 *)(r10 -16) = r0 3: (cc) (u32) r0 s>>= (u32) r0 4: (95) exit Although a number of JITs do support BPF_ALU | BPF_ARSH | BPF_{K,X} generation, not all of them do and interpreter does neither. We can leave existing ones and implement it later in bpf-next for the remaining ones, but reject this properly in verifier for the time being. Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)") Reported-by: syzbot+93c4904c5c70348a6890@syzkaller.appspotmail.com Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 5 +++ tools/testing/selftests/bpf/test_verifier.c | 40 +++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 228c57996c2009..75a5c3312f4603 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2493,6 +2493,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) return -EINVAL; } + if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) { + verbose("BPF_ARSH not supported for 32 bit ALU\n"); + return -EINVAL; + } + if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 7a2d221c4702a7..1241487de93f9e 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -272,6 +272,46 @@ static struct bpf_test tests[] = { .errstr = "invalid bpf_ld_imm64 insn", .result = REJECT, }, + { + "arsh32 on imm", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "BPF_ARSH not supported for 32 bit ALU", + }, + { + "arsh32 on reg", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 5), + BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "BPF_ARSH not supported for 32 bit ALU", + }, + { + "arsh64 on imm", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "arsh64 on reg", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 5), + BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, { "no bpf_exit", .insns = { From fbe0d2251b3896dc927cc980e6bfb4e73814d748 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Diego=20Elio=20Petten=C3=B2?= Date: Fri, 29 Dec 2017 09:54:25 +0000 Subject: [PATCH 1329/3326] USB: serial: cp210x: add IDs for LifeScan OneTouch Verio IQ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 4307413256ac1e09b8f53e8715af3df9e49beec3 upstream. Add IDs for the OneTouch Verio IQ that comes with an embedded USB-to-serial converter. Signed-off-by: Diego Elio Pettenò Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/cp210x.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 412f812522ee05..3d4776a6dea1e5 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -127,6 +127,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ + { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ From d5f322fea60a96812ba3ab079bf03e27be4fc225 Mon Sep 17 00:00:00 2001 From: Christian Holl Date: Wed, 3 Jan 2018 19:53:02 +0100 Subject: [PATCH 1330/3326] USB: serial: cp210x: add new device ID ELV ALC 8xxx commit d14ac576d10f865970bb1324d337e5e24d79aaf4 upstream. This adds the ELV ALC 8xxx Battery Charging device to the list of USB IDs of drivers/usb/serial/cp210x.c Signed-off-by: Christian Holl Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/cp210x.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 3d4776a6dea1e5..aed182d24d2346 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -178,6 +178,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ + { USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */ { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ From f562a823dd2564e8af78424e9580031387809bfd Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Thu, 11 Jan 2018 14:47:40 +0100 Subject: [PATCH 1331/3326] usb: misc: usb3503: make sure reset is low for at least 100us commit b8626f1dc29d3eee444bfaa92146ec7b291ef41c upstream. When using a GPIO which is high by default, and initialize the driver in USB Hub mode, initialization fails with: [ 111.757794] usb3503 0-0008: SP_ILOCK failed (-5) The reason seems to be that the chip is not properly reset. Probe does initialize reset low, however some lines later the code already set it back high, which is not long enouth. Make sure reset is asserted for at least 100us by inserting a delay after initializing the reset pin during probe. Signed-off-by: Stefan Agner Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/usb3503.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c index 8e7737d7ac0a3e..03be5d574f2342 100644 --- a/drivers/usb/misc/usb3503.c +++ b/drivers/usb/misc/usb3503.c @@ -292,6 +292,8 @@ static int usb3503_probe(struct usb3503 *hub) if (gpio_is_valid(hub->gpio_reset)) { err = devm_gpio_request_one(dev, hub->gpio_reset, GPIOF_OUT_INIT_LOW, "usb3503 reset"); + /* Datasheet defines a hardware reset to be at least 100us */ + usleep_range(100, 10000); if (err) { dev_err(dev, "unable to request GPIO %d as reset pin (%d)\n", From 204d8fb1b417732359338ec0eae6eb16ff854bcc Mon Sep 17 00:00:00 2001 From: Pete Zaitcev Date: Mon, 8 Jan 2018 15:46:41 -0600 Subject: [PATCH 1332/3326] USB: fix usbmon BUG trigger commit 46eb14a6e1585d99c1b9f58d0e7389082a5f466b upstream. Automated tests triggered this by opening usbmon and accessing the mmap while simultaneously resizing the buffers. This bug was with us since 2006, because typically applications only size the buffers once and thus avoid racing. Reported by Kirill A. Shutemov. Reported-by: Signed-off-by: Pete Zaitcev Signed-off-by: Greg Kroah-Hartman --- drivers/usb/mon/mon_bin.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index f6ae753ab99b09..f932f40302df94 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -1004,7 +1004,9 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg break; case MON_IOCQ_RING_SIZE: + mutex_lock(&rp->fetch_lock); ret = rp->b_size; + mutex_unlock(&rp->fetch_lock); break; case MON_IOCT_RING_SIZE: @@ -1231,12 +1233,16 @@ static int mon_bin_vma_fault(struct vm_fault *vmf) unsigned long offset, chunk_idx; struct page *pageptr; + mutex_lock(&rp->fetch_lock); offset = vmf->pgoff << PAGE_SHIFT; - if (offset >= rp->b_size) + if (offset >= rp->b_size) { + mutex_unlock(&rp->fetch_lock); return VM_FAULT_SIGBUS; + } chunk_idx = offset / CHUNK_SIZE; pageptr = rp->b_vec[chunk_idx].pg; get_page(pageptr); + mutex_unlock(&rp->fetch_lock); vmf->page = pageptr; return 0; } From 2817c4c8fc1feab053b2e21f15bc040c3828d304 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Wed, 3 Jan 2018 12:51:51 -0500 Subject: [PATCH 1333/3326] USB: UDC core: fix double-free in usb_add_gadget_udc_release commit 7ae2c3c280db183ca9ada2675c34ec2f7378abfa upstream. The error-handling pathways in usb_add_gadget_udc_release() are messed up. Aside from the uninformative statement labels, they can deallocate the udc structure after calling put_device(), which is a double-free. This was observed by KASAN in automatic testing. This patch cleans up the routine. It preserves the requirement that when any failure occurs, we call put_device(&gadget->dev). Signed-off-by: Alan Stern Reported-by: Fengguang Wu Reviewed-by: Peter Chen Acked-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/udc/core.c | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index def1b05ffca0b1..284bd1a7b57032 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1158,11 +1158,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, udc = kzalloc(sizeof(*udc), GFP_KERNEL); if (!udc) - goto err1; - - ret = device_add(&gadget->dev); - if (ret) - goto err2; + goto err_put_gadget; device_initialize(&udc->dev); udc->dev.release = usb_udc_release; @@ -1171,7 +1167,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, udc->dev.parent = parent; ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj)); if (ret) - goto err3; + goto err_put_udc; + + ret = device_add(&gadget->dev); + if (ret) + goto err_put_udc; udc->gadget = gadget; gadget->udc = udc; @@ -1181,7 +1181,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, ret = device_add(&udc->dev); if (ret) - goto err4; + goto err_unlist_udc; usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED); udc->vbus = true; @@ -1189,27 +1189,25 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, /* pick up one of pending gadget drivers */ ret = check_pending_gadget_drivers(udc); if (ret) - goto err5; + goto err_del_udc; mutex_unlock(&udc_lock); return 0; -err5: + err_del_udc: device_del(&udc->dev); -err4: + err_unlist_udc: list_del(&udc->list); mutex_unlock(&udc_lock); -err3: - put_device(&udc->dev); device_del(&gadget->dev); -err2: - kfree(udc); + err_put_udc: + put_device(&udc->dev); -err1: + err_put_gadget: put_device(&gadget->dev); return ret; } From 34e5d097bc36f9f62db3b08b570abcd91800d00d Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Fri, 22 Dec 2017 17:00:06 -0700 Subject: [PATCH 1334/3326] usbip: remove kernel addresses from usb device and urb debug msgs commit e1346fd87c71a1f61de1fe476ec8df1425ac931c upstream. usbip_dump_usb_device() and usbip_dump_urb() print kernel addresses. Remove kernel addresses from usb device and urb debug msgs and improve the message content. Instead of printing parent device and bus addresses, print parent device and bus names. Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usbip/usbip_common.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 17b599b923f367..7f0d2213112111 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -105,7 +105,7 @@ static void usbip_dump_usb_device(struct usb_device *udev) dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)", udev->devnum, udev->devpath, usb_speed_string(udev->speed)); - pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport); + pr_debug("tt hub ttport %d\n", udev->ttport); dev_dbg(dev, " "); for (i = 0; i < 16; i++) @@ -138,12 +138,8 @@ static void usbip_dump_usb_device(struct usb_device *udev) } pr_debug("\n"); - dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus); - - dev_dbg(dev, - "descriptor %p, config %p, actconfig %p, rawdescriptors %p\n", - &udev->descriptor, udev->config, - udev->actconfig, udev->rawdescriptors); + dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev), + udev->bus->bus_name); dev_dbg(dev, "have_langid %d, string_langid %d\n", udev->have_langid, udev->string_langid); @@ -251,9 +247,6 @@ void usbip_dump_urb(struct urb *urb) dev = &urb->dev->dev; - dev_dbg(dev, " urb :%p\n", urb); - dev_dbg(dev, " dev :%p\n", urb->dev); - usbip_dump_usb_device(urb->dev); dev_dbg(dev, " pipe :%08x ", urb->pipe); @@ -262,11 +255,9 @@ void usbip_dump_urb(struct urb *urb) dev_dbg(dev, " status :%d\n", urb->status); dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags); - dev_dbg(dev, " transfer_buffer :%p\n", urb->transfer_buffer); dev_dbg(dev, " transfer_buffer_length:%d\n", urb->transfer_buffer_length); dev_dbg(dev, " actual_length :%d\n", urb->actual_length); - dev_dbg(dev, " setup_packet :%p\n", urb->setup_packet); if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL) usbip_dump_usb_ctrlrequest( @@ -276,8 +267,6 @@ void usbip_dump_urb(struct urb *urb) dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets); dev_dbg(dev, " interval :%d\n", urb->interval); dev_dbg(dev, " error_count :%d\n", urb->error_count); - dev_dbg(dev, " context :%p\n", urb->context); - dev_dbg(dev, " complete :%p\n", urb->complete); } EXPORT_SYMBOL_GPL(usbip_dump_urb); From e56d7cd5f31ef90b1c4ecb9a0f771bad46938a01 Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Fri, 22 Dec 2017 19:23:46 -0700 Subject: [PATCH 1335/3326] usbip: fix vudc_rx: harden CMD_SUBMIT path to handle malicious input commit b78d830f0049ef1966dc1e0ebd1ec2a594e2cf25 upstream. Harden CMD_SUBMIT path to handle malicious input that could trigger large memory allocations. Add checks to validate transfer_buffer_length and number_of_packets to protect against bad input requesting for unbounded memory allocations. Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usbip/vudc_rx.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c index e429b59f6f8a63..d020e72b312259 100644 --- a/drivers/usb/usbip/vudc_rx.c +++ b/drivers/usb/usbip/vudc_rx.c @@ -132,6 +132,25 @@ static int v_recv_cmd_submit(struct vudc *udc, urb_p->new = 1; urb_p->seqnum = pdu->base.seqnum; + if (urb_p->ep->type == USB_ENDPOINT_XFER_ISOC) { + /* validate packet size and number of packets */ + unsigned int maxp, packets, bytes; + + maxp = usb_endpoint_maxp(urb_p->ep->desc); + maxp *= usb_endpoint_maxp_mult(urb_p->ep->desc); + bytes = pdu->u.cmd_submit.transfer_buffer_length; + packets = DIV_ROUND_UP(bytes, maxp); + + if (pdu->u.cmd_submit.number_of_packets < 0 || + pdu->u.cmd_submit.number_of_packets > packets) { + dev_err(&udc->gadget.dev, + "CMD_SUBMIT: isoc invalid num packets %d\n", + pdu->u.cmd_submit.number_of_packets); + ret = -EMSGSIZE; + goto free_urbp; + } + } + ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type); if (ret) { usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC); From 0382becc1d24e1cd00dc44e1fa5f7655441c258c Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Fri, 22 Dec 2017 19:23:47 -0700 Subject: [PATCH 1336/3326] usbip: vudc_tx: fix v_send_ret_submit() vulnerability to null xfer buffer commit 5fd77a3a0e408c23ab4002a57db980e46bc16e72 upstream. v_send_ret_submit() handles urb with a null transfer_buffer, when it replays a packet with potential malicious data that could contain a null buffer. Add a check for the condition when actual_length > 0 and transfer_buffer is null. Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usbip/vudc_tx.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c index 234661782fa07e..3ab4c86486a740 100644 --- a/drivers/usb/usbip/vudc_tx.c +++ b/drivers/usb/usbip/vudc_tx.c @@ -97,6 +97,13 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p) memset(&pdu_header, 0, sizeof(pdu_header)); memset(&msg, 0, sizeof(msg)); + if (urb->actual_length > 0 && !urb->transfer_buffer) { + dev_err(&udc->gadget.dev, + "urb: actual_length %d transfer_buffer null\n", + urb->actual_length); + return -1; + } + if (urb_p->type == USB_ENDPOINT_XFER_ISOC) iovnum = 2 + urb->number_of_packets; else @@ -112,8 +119,8 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p) /* 1. setup usbip_header */ setup_ret_submit_pdu(&pdu_header, urb_p); - usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", - pdu_header.base.seqnum, urb); + usbip_dbg_stub_tx("setup txdata seqnum: %d\n", + pdu_header.base.seqnum); usbip_header_correct_endian(&pdu_header, 1); iov[iovnum].iov_base = &pdu_header; From 242e20a5b3cc2b99ace977546d3cffc6a2a35991 Mon Sep 17 00:00:00 2001 From: Viktor Slavkovic Date: Mon, 8 Jan 2018 10:43:03 -0800 Subject: [PATCH 1337/3326] staging: android: ashmem: fix a race condition in ASHMEM_SET_SIZE ioctl commit 443064cb0b1fb4569fe0a71209da7625129fb760 upstream. A lock-unlock is missing in ASHMEM_SET_SIZE ioctl which can result in a race condition when mmap is called. After the !asma->file check, before setting asma->size, asma->file can be set in mmap. That would result in having different asma->size than the mapped memory size. Combined with ASHMEM_UNPIN ioctl and shrinker invocation, this can result in memory corruption. Signed-off-by: Viktor Slavkovic Signed-off-by: Greg Kroah-Hartman --- drivers/staging/android/ashmem.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 0f695df14c9d8f..372ce9913e6dea 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -765,10 +765,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; case ASHMEM_SET_SIZE: ret = -EINVAL; + mutex_lock(&ashmem_mutex); if (!asma->file) { ret = 0; asma->size = (size_t)arg; } + mutex_unlock(&ashmem_mutex); break; case ASHMEM_GET_SIZE: ret = asma->size; From 02462928e2234eea95dd1ce05ca1f2c02088dc13 Mon Sep 17 00:00:00 2001 From: Ben Seri Date: Fri, 8 Dec 2017 15:14:47 +0100 Subject: [PATCH 1338/3326] Bluetooth: Prevent stack info leak from the EFS element. commit 06e7e776ca4d36547e503279aeff996cbb292c16 upstream. In the function l2cap_parse_conf_rsp and in the function l2cap_parse_conf_req the following variable is declared without initialization: struct l2cap_conf_efs efs; In addition, when parsing input configuration parameters in both of these functions, the switch case for handling EFS elements may skip the memcpy call that will write to the efs variable: ... case L2CAP_CONF_EFS: if (olen == sizeof(efs)) memcpy(&efs, (void *)val, olen); ... The olen in the above if is attacker controlled, and regardless of that if, in both of these functions the efs variable would eventually be added to the outgoing configuration request that is being built: l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), (unsigned long) &efs); So by sending a configuration request, or response, that contains an L2CAP_CONF_EFS element, but with an element length that is not sizeof(efs) - the memcpy to the uninitialized efs variable can be avoided, and the uninitialized variable would be returned to the attacker (16 bytes). This issue has been assigned CVE-2017-1000410 Cc: Marcel Holtmann Cc: Gustavo Padovan Cc: Johan Hedberg Signed-off-by: Ben Seri Signed-off-by: Greg Kroah-Hartman --- net/bluetooth/l2cap_core.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 43ba91c440bcd6..fc6615d5916524 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -3363,9 +3363,10 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data break; case L2CAP_CONF_EFS: - remote_efs = 1; - if (olen == sizeof(efs)) + if (olen == sizeof(efs)) { + remote_efs = 1; memcpy(&efs, (void *) val, olen); + } break; case L2CAP_CONF_EWS: @@ -3584,16 +3585,17 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, break; case L2CAP_CONF_EFS: - if (olen == sizeof(efs)) + if (olen == sizeof(efs)) { memcpy(&efs, (void *)val, olen); - if (chan->local_stype != L2CAP_SERV_NOTRAFIC && - efs.stype != L2CAP_SERV_NOTRAFIC && - efs.stype != chan->local_stype) - return -ECONNREFUSED; + if (chan->local_stype != L2CAP_SERV_NOTRAFIC && + efs.stype != L2CAP_SERV_NOTRAFIC && + efs.stype != chan->local_stype) + return -ECONNREFUSED; - l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), - (unsigned long) &efs, endptr - ptr); + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), + (unsigned long) &efs, endptr - ptr); + } break; case L2CAP_CONF_FCS: From 8e13548c7f50cba68a0a122218431bec741938f9 Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Sat, 6 Jan 2018 00:56:44 +0800 Subject: [PATCH 1339/3326] uas: ignore UAS for Norelsys NS1068(X) chips commit 928afc85270753657b5543e052cc270c279a3fe9 upstream. The UAS mode of Norelsys NS1068(X) is reported to fail to work on several platforms with the following error message: xhci-hcd xhci-hcd.0.auto: ERROR Transfer event for unknown stream ring slot 1 ep 8 xhci-hcd xhci-hcd.0.auto: @00000000bf04a400 00000000 00000000 1b000000 01098001 And when trying to mount a partition on the disk the disk will disconnect from the USB controller, then after re-connecting the device will be offlined and not working at all. Falling back to USB mass storage can solve this problem, so ignore UAS function of this chip. Signed-off-by: Icenowy Zheng Acked-by: Hans de Goede Signed-off-by: Greg Kroah-Hartman --- drivers/usb/storage/unusual_uas.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 9f356f7cf7d500..719ec68ae3099e 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -156,6 +156,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_ATA_1X), +/* Reported-by: Icenowy Zheng */ +UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999, + "Norelsys", + "NS1068X", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* Reported-by: Takeo Nakayama */ UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999, "JMicron", From 45a061d6b4e0e671cc3aa1bed8bf47078eade69e Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 29 Dec 2017 00:22:54 +0100 Subject: [PATCH 1340/3326] mux: core: fix double get_device() commit aa1f10e85b0ab53dee85d8e293c8159d18d293a8 upstream. class_find_device already does a get_device on the returned device. So the device returned by of_find_mux_chip_by_node is already referenced and we should not reference it again (and unref it on error). Signed-off-by: Hans de Goede Signed-off-by: Peter Rosin Signed-off-by: Greg Kroah-Hartman --- drivers/mux/core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/mux/core.c b/drivers/mux/core.c index 2260063b0ea83b..6e5cf9d9cd9927 100644 --- a/drivers/mux/core.c +++ b/drivers/mux/core.c @@ -413,6 +413,7 @@ static int of_dev_node_match(struct device *dev, const void *data) return dev->of_node == data; } +/* Note this function returns a reference to the mux_chip dev. */ static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np) { struct device *dev; @@ -466,6 +467,7 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name) (!args.args_count && (mux_chip->controllers > 1))) { dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n", np, args.np); + put_device(&mux_chip->dev); return ERR_PTR(-EINVAL); } @@ -476,10 +478,10 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name) if (controller >= mux_chip->controllers) { dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n", np, controller, args.np); + put_device(&mux_chip->dev); return ERR_PTR(-EINVAL); } - get_device(&mux_chip->dev); return &mux_chip->mux[controller]; } EXPORT_SYMBOL_GPL(mux_control_get); From a4ae05b7bfcdd72f135099b848e1777e486efb19 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 12 Jan 2018 16:53:14 -0800 Subject: [PATCH 1341/3326] kdump: write correct address of mem_section into vmcoreinfo commit a0b1280368d1e91ab72f849ef095b4f07a39bbf1 upstream. Depending on configuration mem_section can now be an array or a pointer to an array allocated dynamically. In most cases, we can continue to refer to it as 'mem_section' regardless of what it is. But there's one exception: '&mem_section' means "address of the array" if mem_section is an array, but if mem_section is a pointer, it would mean "address of the pointer". We've stepped onto this in kdump code. VMCOREINFO_SYMBOL(mem_section) writes down address of pointer into vmcoreinfo, not array as we wanted. Let's introduce VMCOREINFO_SYMBOL_ARRAY() that would handle the situation correctly for both cases. Link: http://lkml.kernel.org/r/20180112162532.35896-1-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Fixes: 83e3c48729d9 ("mm/sparsemem: Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y") Acked-by: Baoquan He Acked-by: Dave Young Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Greg Kroah-Hartman Cc: Dave Young Cc: Baoquan He Cc: Vivek Goyal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/crash_core.h | 2 ++ kernel/crash_core.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 06097ef304491d..b511f6d24b42b0 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -42,6 +42,8 @@ phys_addr_t paddr_vmcoreinfo_note(void); vmcoreinfo_append_str("PAGESIZE=%ld\n", value) #define VMCOREINFO_SYMBOL(name) \ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name) +#define VMCOREINFO_SYMBOL_ARRAY(name) \ + vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name) #define VMCOREINFO_SIZE(name) \ vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \ (unsigned long)sizeof(name)) diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 6db80fc0810b92..2d90996dbe7715 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -409,7 +409,7 @@ static int __init crash_save_vmcoreinfo_init(void) VMCOREINFO_SYMBOL(contig_page_data); #endif #ifdef CONFIG_SPARSEMEM - VMCOREINFO_SYMBOL(mem_section); + VMCOREINFO_SYMBOL_ARRAY(mem_section); VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); VMCOREINFO_STRUCT_SIZE(mem_section); VMCOREINFO_OFFSET(mem_section, section_mem_map); From 233363fd02c5fffdfd0be305b70adbbe661d22cf Mon Sep 17 00:00:00 2001 From: John Johansen Date: Fri, 8 Dec 2017 17:43:18 -0800 Subject: [PATCH 1342/3326] apparmor: fix ptrace label match when matching stacked labels commit 0dda0b3fb255048a221f736c8a2a24c674da8bf3 upstream. Given a label with a profile stack of A//&B or A//&C ... A ptrace rule should be able to specify a generic trace pattern with a rule like ptrace trace A//&**, however this is failing because while the correct label match routine is called, it is being done post label decomposition so it is always being done against a profile instead of the stacked label. To fix this refactor the cross check to pass the full peer label in to the label_match. Fixes: 290f458a4f16 ("apparmor: allow ptrace checks to be finer grained than just capability") Reported-by: Matthew Garrett Tested-by: Matthew Garrett Signed-off-by: John Johansen Signed-off-by: Greg Kroah-Hartman --- security/apparmor/include/perms.h | 3 ++ security/apparmor/ipc.c | 53 +++++++++++++++++++------------ 2 files changed, 35 insertions(+), 21 deletions(-) diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h index 2b27bb79aec442..d7b7e711516010 100644 --- a/security/apparmor/include/perms.h +++ b/security/apparmor/include/perms.h @@ -133,6 +133,9 @@ extern struct aa_perms allperms; #define xcheck_labels_profiles(L1, L2, FN, args...) \ xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args) +#define xcheck_labels(L1, L2, P, FN1, FN2) \ + xcheck(fn_for_each((L1), (P), (FN1)), fn_for_each((L2), (P), (FN2))) + void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask); void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask); diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c index 7ca0032e7ba96e..b40678f3c1d5a4 100644 --- a/security/apparmor/ipc.c +++ b/security/apparmor/ipc.c @@ -64,40 +64,48 @@ static void audit_ptrace_cb(struct audit_buffer *ab, void *va) FLAGS_NONE, GFP_ATOMIC); } +/* assumes check for PROFILE_MEDIATES is already done */ /* TODO: conditionals */ static int profile_ptrace_perm(struct aa_profile *profile, - struct aa_profile *peer, u32 request, - struct common_audit_data *sa) + struct aa_label *peer, u32 request, + struct common_audit_data *sa) { struct aa_perms perms = { }; - /* need because of peer in cross check */ - if (profile_unconfined(profile) || - !PROFILE_MEDIATES(profile, AA_CLASS_PTRACE)) - return 0; - - aad(sa)->peer = &peer->label; - aa_profile_match_label(profile, &peer->label, AA_CLASS_PTRACE, request, + aad(sa)->peer = peer; + aa_profile_match_label(profile, peer, AA_CLASS_PTRACE, request, &perms); aa_apply_modes_to_perms(profile, &perms); return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb); } -static int cross_ptrace_perm(struct aa_profile *tracer, - struct aa_profile *tracee, u32 request, - struct common_audit_data *sa) +static int profile_tracee_perm(struct aa_profile *tracee, + struct aa_label *tracer, u32 request, + struct common_audit_data *sa) { + if (profile_unconfined(tracee) || unconfined(tracer) || + !PROFILE_MEDIATES(tracee, AA_CLASS_PTRACE)) + return 0; + + return profile_ptrace_perm(tracee, tracer, request, sa); +} + +static int profile_tracer_perm(struct aa_profile *tracer, + struct aa_label *tracee, u32 request, + struct common_audit_data *sa) +{ + if (profile_unconfined(tracer)) + return 0; + if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE)) - return xcheck(profile_ptrace_perm(tracer, tracee, request, sa), - profile_ptrace_perm(tracee, tracer, - request << PTRACE_PERM_SHIFT, - sa)); - /* policy uses the old style capability check for ptrace */ - if (profile_unconfined(tracer) || tracer == tracee) + return profile_ptrace_perm(tracer, tracee, request, sa); + + /* profile uses the old style capability check for ptrace */ + if (&tracer->label == tracee) return 0; aad(sa)->label = &tracer->label; - aad(sa)->peer = &tracee->label; + aad(sa)->peer = tracee; aad(sa)->request = 0; aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE, 1); @@ -115,10 +123,13 @@ static int cross_ptrace_perm(struct aa_profile *tracer, int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee, u32 request) { + struct aa_profile *profile; + u32 xrequest = request << PTRACE_PERM_SHIFT; DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE); - return xcheck_labels_profiles(tracer, tracee, cross_ptrace_perm, - request, &sa); + return xcheck_labels(tracer, tracee, profile, + profile_tracer_perm(profile, tracee, request, &sa), + profile_tracee_perm(profile, tracer, xrequest, &sa)); } From fbbc6c4eeb0a55935e0d4d89a46f3783cf170b84 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Mon, 11 Dec 2017 16:26:40 +0900 Subject: [PATCH 1343/3326] e1000e: Fix e1000_check_for_copper_link_ich8lan return value. commit 4110e02eb45ea447ec6f5459c9934de0a273fb91 upstream. e1000e_check_for_copper_link() and e1000_check_for_copper_link_ich8lan() are the two functions that may be assigned to mac.ops.check_for_link when phy.media_type == e1000_media_type_copper. Commit 19110cfbb34d ("e1000e: Separate signaling for link check/link up") changed the meaning of the return value of check_for_link for copper media but only adjusted the first function. This patch adjusts the second function likewise. Reported-by: Christian Hesse Reported-by: Gabriel C Link: https://bugzilla.kernel.org/show_bug.cgi?id=198047 Fixes: 19110cfbb34d ("e1000e: Separate signaling for link check/link up") Signed-off-by: Benjamin Poirier Tested-by: Aaron Brown Tested-by: Christian Hesse Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/e1000e/ich8lan.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index d6d4ed7acf0311..31277d3bb7dc12 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1367,6 +1367,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. + * + * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link + * up). **/ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) { @@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) - return 0; + return 1; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex @@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) + if (ret_val) { e_dbg("Error configuring flow control\n"); + return ret_val; + } - return ret_val; + return 1; } static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) From 58168505a9f13a60b3465fbdd85c70a560640a41 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Fri, 5 Jan 2018 22:35:41 +0100 Subject: [PATCH 1344/3326] x86/pti: Unbreak EFI old_memmap commit de53c3786a3ce162a1c815d0c04c766c23ec9c0a upstream. EFI_OLD_MEMMAP's efi_call_phys_prolog() calls set_pgd() with swapper PGD that has PAGE_USER set, which makes PTI set NX on it, and therefore EFI can't execute it's code. Fix that by forcefully clearing _PAGE_NX from the PGD (this can't be done by the pgprot API). _PAGE_NX will be automatically reintroduced in efi_call_phys_epilog(), as _set_pgd() will again notice that this is _PAGE_USER, and set _PAGE_NX on it. Tested-by: Dimitri Sivanich Signed-off-by: Jiri Kosina Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner Acked-by: Dave Hansen Cc: Andrea Arcangeli Cc: Ard Biesheuvel Cc: Linus Torvalds Cc: Matt Fleming Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/nycvar.YFH.7.76.1801052215460.11852@cbobk.fhfr.pm Signed-off-by: Greg Kroah-Hartman --- arch/x86/platform/efi/efi_64.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 39c4b35ac7a4a9..61975b6bcb1a0a 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -134,7 +134,9 @@ pgd_t * __init efi_call_phys_prolog(void) pud[j] = *pud_offset(p4d_k, vaddr); } } + pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX; } + out: __flush_tlb_all(); From af17c6526b60d52a35033fede9461bd9fbfa5a70 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 5 Jan 2018 09:44:36 -0800 Subject: [PATCH 1345/3326] x86/Documentation: Add PTI description commit 01c9b17bf673b05bb401b76ec763e9730ccf1376 upstream. Add some details about how PTI works, what some of the downsides are, and how to debug it when things go wrong. Also document the kernel parameter: 'pti/nopti'. Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Reviewed-by: Randy Dunlap Reviewed-by: Kees Cook Cc: Moritz Lipp Cc: Daniel Gruss Cc: Michael Schwarz Cc: Richard Fellner Cc: Andy Lutomirski Cc: Linus Torvalds Cc: Hugh Dickins Cc: Andi Lutomirsky Link: https://lkml.kernel.org/r/20180105174436.1BC6FA2B@viggo.jf.intel.com Signed-off-by: Greg Kroah-Hartman --- .../admin-guide/kernel-parameters.txt | 21 +- Documentation/x86/pti.txt | 186 ++++++++++++++++++ 2 files changed, 200 insertions(+), 7 deletions(-) create mode 100644 Documentation/x86/pti.txt diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 520fdec15bbb5b..905991745d261d 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2685,8 +2685,6 @@ steal time is computed, but won't influence scheduler behaviour - nopti [X86-64] Disable kernel page table isolation - nolapic [X86-32,APIC] Do not enable or use the local APIC. nolapic_timer [X86-32,APIC] Do not use the local APIC timer. @@ -3255,11 +3253,20 @@ pt. [PARIDE] See Documentation/blockdev/paride.txt. - pti= [X86_64] - Control user/kernel address space isolation: - on - enable - off - disable - auto - default setting + pti= [X86_64] Control Page Table Isolation of user and + kernel address spaces. Disabling this feature + removes hardening, but improves performance of + system calls and interrupts. + + on - unconditionally enable + off - unconditionally disable + auto - kernel detects whether your CPU model is + vulnerable to issues that PTI mitigates + + Not specifying this option is equivalent to pti=auto. + + nopti [X86_64] + Equivalent to pti=off pty.legacy_count= [KNL] Number of legacy pty's. Overwrites compiled-in diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt new file mode 100644 index 00000000000000..d11eff61fc9add --- /dev/null +++ b/Documentation/x86/pti.txt @@ -0,0 +1,186 @@ +Overview +======== + +Page Table Isolation (pti, previously known as KAISER[1]) is a +countermeasure against attacks on the shared user/kernel address +space such as the "Meltdown" approach[2]. + +To mitigate this class of attacks, we create an independent set of +page tables for use only when running userspace applications. When +the kernel is entered via syscalls, interrupts or exceptions, the +page tables are switched to the full "kernel" copy. When the system +switches back to user mode, the user copy is used again. + +The userspace page tables contain only a minimal amount of kernel +data: only what is needed to enter/exit the kernel such as the +entry/exit functions themselves and the interrupt descriptor table +(IDT). There are a few strictly unnecessary things that get mapped +such as the first C function when entering an interrupt (see +comments in pti.c). + +This approach helps to ensure that side-channel attacks leveraging +the paging structures do not function when PTI is enabled. It can be +enabled by setting CONFIG_PAGE_TABLE_ISOLATION=y at compile time. +Once enabled at compile-time, it can be disabled at boot with the +'nopti' or 'pti=' kernel parameters (see kernel-parameters.txt). + +Page Table Management +===================== + +When PTI is enabled, the kernel manages two sets of page tables. +The first set is very similar to the single set which is present in +kernels without PTI. This includes a complete mapping of userspace +that the kernel can use for things like copy_to_user(). + +Although _complete_, the user portion of the kernel page tables is +crippled by setting the NX bit in the top level. This ensures +that any missed kernel->user CR3 switch will immediately crash +userspace upon executing its first instruction. + +The userspace page tables map only the kernel data needed to enter +and exit the kernel. This data is entirely contained in the 'struct +cpu_entry_area' structure which is placed in the fixmap which gives +each CPU's copy of the area a compile-time-fixed virtual address. + +For new userspace mappings, the kernel makes the entries in its +page tables like normal. The only difference is when the kernel +makes entries in the top (PGD) level. In addition to setting the +entry in the main kernel PGD, a copy of the entry is made in the +userspace page tables' PGD. + +This sharing at the PGD level also inherently shares all the lower +layers of the page tables. This leaves a single, shared set of +userspace page tables to manage. One PTE to lock, one set of +accessed bits, dirty bits, etc... + +Overhead +======== + +Protection against side-channel attacks is important. But, +this protection comes at a cost: + +1. Increased Memory Use + a. Each process now needs an order-1 PGD instead of order-0. + (Consumes an additional 4k per process). + b. The 'cpu_entry_area' structure must be 2MB in size and 2MB + aligned so that it can be mapped by setting a single PMD + entry. This consumes nearly 2MB of RAM once the kernel + is decompressed, but no space in the kernel image itself. + +2. Runtime Cost + a. CR3 manipulation to switch between the page table copies + must be done at interrupt, syscall, and exception entry + and exit (it can be skipped when the kernel is interrupted, + though.) Moves to CR3 are on the order of a hundred + cycles, and are required at every entry and exit. + b. A "trampoline" must be used for SYSCALL entry. This + trampoline depends on a smaller set of resources than the + non-PTI SYSCALL entry code, so requires mapping fewer + things into the userspace page tables. The downside is + that stacks must be switched at entry time. + d. Global pages are disabled for all kernel structures not + mapped into both kernel and userspace page tables. This + feature of the MMU allows different processes to share TLB + entries mapping the kernel. Losing the feature means more + TLB misses after a context switch. The actual loss of + performance is very small, however, never exceeding 1%. + d. Process Context IDentifiers (PCID) is a CPU feature that + allows us to skip flushing the entire TLB when switching page + tables by setting a special bit in CR3 when the page tables + are changed. This makes switching the page tables (at context + switch, or kernel entry/exit) cheaper. But, on systems with + PCID support, the context switch code must flush both the user + and kernel entries out of the TLB. The user PCID TLB flush is + deferred until the exit to userspace, minimizing the cost. + See intel.com/sdm for the gory PCID/INVPCID details. + e. The userspace page tables must be populated for each new + process. Even without PTI, the shared kernel mappings + are created by copying top-level (PGD) entries into each + new process. But, with PTI, there are now *two* kernel + mappings: one in the kernel page tables that maps everything + and one for the entry/exit structures. At fork(), we need to + copy both. + f. In addition to the fork()-time copying, there must also + be an update to the userspace PGD any time a set_pgd() is done + on a PGD used to map userspace. This ensures that the kernel + and userspace copies always map the same userspace + memory. + g. On systems without PCID support, each CR3 write flushes + the entire TLB. That means that each syscall, interrupt + or exception flushes the TLB. + h. INVPCID is a TLB-flushing instruction which allows flushing + of TLB entries for non-current PCIDs. Some systems support + PCIDs, but do not support INVPCID. On these systems, addresses + can only be flushed from the TLB for the current PCID. When + flushing a kernel address, we need to flush all PCIDs, so a + single kernel address flush will require a TLB-flushing CR3 + write upon the next use of every PCID. + +Possible Future Work +==================== +1. We can be more careful about not actually writing to CR3 + unless its value is actually changed. +2. Allow PTI to be enabled/disabled at runtime in addition to the + boot-time switching. + +Testing +======== + +To test stability of PTI, the following test procedure is recommended, +ideally doing all of these in parallel: + +1. Set CONFIG_DEBUG_ENTRY=y +2. Run several copies of all of the tools/testing/selftests/x86/ tests + (excluding MPX and protection_keys) in a loop on multiple CPUs for + several minutes. These tests frequently uncover corner cases in the + kernel entry code. In general, old kernels might cause these tests + themselves to crash, but they should never crash the kernel. +3. Run the 'perf' tool in a mode (top or record) that generates many + frequent performance monitoring non-maskable interrupts (see "NMI" + in /proc/interrupts). This exercises the NMI entry/exit code which + is known to trigger bugs in code paths that did not expect to be + interrupted, including nested NMIs. Using "-c" boosts the rate of + NMIs, and using two -c with separate counters encourages nested NMIs + and less deterministic behavior. + + while true; do perf record -c 10000 -e instructions,cycles -a sleep 10; done + +4. Launch a KVM virtual machine. +5. Run 32-bit binaries on systems supporting the SYSCALL instruction. + This has been a lightly-tested code path and needs extra scrutiny. + +Debugging +========= + +Bugs in PTI cause a few different signatures of crashes +that are worth noting here. + + * Failures of the selftests/x86 code. Usually a bug in one of the + more obscure corners of entry_64.S + * Crashes in early boot, especially around CPU bringup. Bugs + in the trampoline code or mappings cause these. + * Crashes at the first interrupt. Caused by bugs in entry_64.S, + like screwing up a page table switch. Also caused by + incorrectly mapping the IRQ handler entry code. + * Crashes at the first NMI. The NMI code is separate from main + interrupt handlers and can have bugs that do not affect + normal interrupts. Also caused by incorrectly mapping NMI + code. NMIs that interrupt the entry code must be very + careful and can be the cause of crashes that show up when + running perf. + * Kernel crashes at the first exit to userspace. entry_64.S + bugs, or failing to map some of the exit code. + * Crashes at first interrupt that interrupts userspace. The paths + in entry_64.S that return to userspace are sometimes separate + from the ones that return to the kernel. + * Double faults: overflowing the kernel stack because of page + faults upon page faults. Caused by touching non-pti-mapped + data in the entry code, or forgetting to switch to kernel + CR3 before calling into C functions which are not pti-mapped. + * Userspace segfaults early in boot, sometimes manifesting + as mount(8) failing to mount the rootfs. These have + tended to be TLB invalidation issues. Usually invalidating + the wrong PCID, or otherwise missing an invalidation. + +1. https://gruss.cc/files/kaiser.pdf +2. https://meltdownattack.com/meltdown.pdf From 9298e868dddd820829f814cd25a0f28c92036af7 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 6 Jan 2018 11:49:23 +0000 Subject: [PATCH 1346/3326] x86/cpufeatures: Add X86_BUG_SPECTRE_V[12] commit 99c6fa2511d8a683e61468be91b83f85452115fa upstream. Add the bug bits for spectre v1/2 and force them unconditionally for all cpus. Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515239374-23361-2-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 2 ++ arch/x86/kernel/cpu/common.c | 3 +++ 2 files changed, 5 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 21ac898df2d8e4..1641c2f9636347 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -342,5 +342,7 @@ #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ #define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ +#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ +#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 2d3bd2215e5b47..372ba3fb400fad 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -902,6 +902,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) if (c->x86_vendor != X86_VENDOR_AMD) setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); + setup_force_cpu_bug(X86_BUG_SPECTRE_V1); + setup_force_cpu_bug(X86_BUG_SPECTRE_V2); + fpu__init_system(c); #ifdef CONFIG_X86_32 From 5a3e4b399ebc994516f7b6bd8ae0584027e72418 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 7 Jan 2018 22:48:00 +0100 Subject: [PATCH 1347/3326] sysfs/cpu: Add vulnerability folder commit 87590ce6e373d1a5401f6539f0c59ef92dd924a9 upstream. As the meltdown/spectre problem affects several CPU architectures, it makes sense to have common way to express whether a system is affected by a particular vulnerability or not. If affected the way to express the mitigation should be common as well. Create /sys/devices/system/cpu/vulnerabilities folder and files for meltdown, spectre_v1 and spectre_v2. Allow architectures to override the show function. Signed-off-by: Thomas Gleixner Reviewed-by: Greg Kroah-Hartman Reviewed-by: Konrad Rzeszutek Wilk Cc: Peter Zijlstra Cc: Will Deacon Cc: Dave Hansen Cc: Linus Torvalds Cc: Borislav Petkov Cc: David Woodhouse Link: https://lkml.kernel.org/r/20180107214913.096657732@linutronix.de Signed-off-by: Greg Kroah-Hartman --- .../ABI/testing/sysfs-devices-system-cpu | 16 +++++++ drivers/base/Kconfig | 3 ++ drivers/base/cpu.c | 48 +++++++++++++++++++ include/linux/cpu.h | 7 +++ 4 files changed, 74 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index f3d5817c4ef0fa..bd3a88e16d8b45 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -373,3 +373,19 @@ Contact: Linux kernel mailing list Description: information about CPUs heterogeneity. cpu_capacity: capacity of cpu#. + +What: /sys/devices/system/cpu/vulnerabilities + /sys/devices/system/cpu/vulnerabilities/meltdown + /sys/devices/system/cpu/vulnerabilities/spectre_v1 + /sys/devices/system/cpu/vulnerabilities/spectre_v2 +Date: Januar 2018 +Contact: Linux kernel mailing list +Description: Information about CPU vulnerabilities + + The files are named after the code names of CPU + vulnerabilities. The output of those files reflects the + state of the CPUs in the system. Possible output values: + + "Not affected" CPU is not affected by the vulnerability + "Vulnerable" CPU is affected and no mitigation in effect + "Mitigation: $M" CPU is affetcted and mitigation $M is in effect diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index bdc87907d6a1b2..2415ad9f6dd4f1 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -236,6 +236,9 @@ config GENERIC_CPU_DEVICES config GENERIC_CPU_AUTOPROBE bool +config GENERIC_CPU_VULNERABILITIES + bool + config SOC_BUS bool select GLOB diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 321cd7b4d817fd..825964efda1dc3 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -501,10 +501,58 @@ static void __init cpu_dev_register_generic(void) #endif } +#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES + +ssize_t __weak cpu_show_meltdown(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_spectre_v1(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_spectre_v2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + +static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); +static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); +static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); + +static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, + &dev_attr_spectre_v1.attr, + &dev_attr_spectre_v2.attr, + NULL +}; + +static const struct attribute_group cpu_root_vulnerabilities_group = { + .name = "vulnerabilities", + .attrs = cpu_root_vulnerabilities_attrs, +}; + +static void __init cpu_register_vulnerabilities(void) +{ + if (sysfs_create_group(&cpu_subsys.dev_root->kobj, + &cpu_root_vulnerabilities_group)) + pr_err("Unable to register CPU vulnerabilities\n"); +} + +#else +static inline void cpu_register_vulnerabilities(void) { } +#endif + void __init cpu_dev_init(void) { if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) panic("Failed to register CPU subsystem"); cpu_dev_register_generic(); + cpu_register_vulnerabilities(); } diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 938ea8ae0ba422..c816e6f2730cd6 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -47,6 +47,13 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr); extern int cpu_add_dev_attr_group(struct attribute_group *attrs); extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); +extern ssize_t cpu_show_meltdown(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spectre_v1(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spectre_v2(struct device *dev, + struct device_attribute *attr, char *buf); + extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, const struct attribute_group **groups, From e0d753568c16f123e4805b7136734f3a83403fb6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 7 Jan 2018 22:48:01 +0100 Subject: [PATCH 1348/3326] x86/cpu: Implement CPU vulnerabilites sysfs functions commit 61dc0f555b5c761cdafb0ba5bd41ecf22d68a4c4 upstream. Implement the CPU vulnerabilty show functions for meltdown, spectre_v1 and spectre_v2. Signed-off-by: Thomas Gleixner Reviewed-by: Greg Kroah-Hartman Reviewed-by: Konrad Rzeszutek Wilk Cc: Peter Zijlstra Cc: Will Deacon Cc: Dave Hansen Cc: Linus Torvalds Cc: Borislav Petkov Cc: David Woodhouse Link: https://lkml.kernel.org/r/20180107214913.177414879@linutronix.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/Kconfig | 1 + arch/x86/kernel/cpu/bugs.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 592c974d455887..7184ebb0cd7846 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -89,6 +89,7 @@ config X86 select GENERIC_CLOCKEVENTS_MIN_ADJUST select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE + select GENERIC_CPU_VULNERABILITIES select GENERIC_EARLY_IOREMAP select GENERIC_FIND_FIRST_BIT select GENERIC_IOMAP diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index ba0b2424c9b050..76ad6cb44b4026 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -10,6 +10,7 @@ */ #include #include +#include #include #include #include @@ -60,3 +61,31 @@ void __init check_bugs(void) set_memory_4k((unsigned long)__va(0), 1); #endif } + +#ifdef CONFIG_SYSFS +ssize_t cpu_show_meltdown(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + return sprintf(buf, "Not affected\n"); + if (boot_cpu_has(X86_FEATURE_PTI)) + return sprintf(buf, "Mitigation: PTI\n"); + return sprintf(buf, "Vulnerable\n"); +} + +ssize_t cpu_show_spectre_v1(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) + return sprintf(buf, "Not affected\n"); + return sprintf(buf, "Vulnerable\n"); +} + +ssize_t cpu_show_spectre_v2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return sprintf(buf, "Not affected\n"); + return sprintf(buf, "Vulnerable\n"); +} +#endif From 968a3bb156424a9e4ea7aa1eb2779b6e86b5a63e Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Sat, 6 Jan 2018 18:41:14 +0100 Subject: [PATCH 1349/3326] x86/tboot: Unbreak tboot with PTI enabled commit 262b6b30087246abf09d6275eb0c0dc421bcbe38 upstream. This is another case similar to what EFI does: create a new set of page tables, map some code at a low address, and jump to it. PTI mistakes this low address for userspace and mistakenly marks it non-executable in an effort to make it unusable for userspace. Undo the poison to allow execution. Fixes: 385ce0ea4c07 ("x86/mm/pti: Add Kconfig") Signed-off-by: Dave Hansen Signed-off-by: Andrea Arcangeli Signed-off-by: Thomas Gleixner Cc: Alan Cox Cc: Tim Chen Cc: Jon Masters Cc: Dave Hansen Cc: Andi Kleen Cc: Jeff Law Cc: Paolo Bonzini Cc: Linus Torvalds Cc: Greg Kroah-Hartman Cc: David Cc: Nick Clifton Link: https://lkml.kernel.org/r/20180108102805.GK25546@redhat.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/tboot.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index a4eb27918cebf9..75869a4b6c4188 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -127,6 +127,7 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn, p4d = p4d_alloc(&tboot_mm, pgd, vaddr); if (!p4d) return -1; + pgd->pgd &= ~_PAGE_NX; pud = pud_alloc(&tboot_mm, p4d, vaddr); if (!pud) return -1; From c0090f9f280d576a3b0cab9fb5eec9fb0de988d0 Mon Sep 17 00:00:00 2001 From: Jike Song Date: Tue, 9 Jan 2018 00:03:41 +0800 Subject: [PATCH 1350/3326] x86/mm/pti: Remove dead logic in pti_user_pagetable_walk*() commit 8d56eff266f3e41a6c39926269c4c3f58f881a8e upstream. The following code contains dead logic: 162 if (pgd_none(*pgd)) { 163 unsigned long new_p4d_page = __get_free_page(gfp); 164 if (!new_p4d_page) 165 return NULL; 166 167 if (pgd_none(*pgd)) { 168 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); 169 new_p4d_page = 0; 170 } 171 if (new_p4d_page) 172 free_page(new_p4d_page); 173 } There can't be any difference between two pgd_none(*pgd) at L162 and L167, so it's always false at L171. Dave Hansen explained: Yes, the double-test was part of an optimization where we attempted to avoid using a global spinlock in the fork() path. We would check for unallocated mid-level page tables without the lock. The lock was only taken when we needed to *make* an entry to avoid collisions. Now that it is all single-threaded, there is no chance of a collision, no need for a lock, and no need for the re-check. As all these functions are only called during init, mark them __init as well. Fixes: 03f4424f348e ("x86/mm/pti: Add functions to clone kernel PMDs") Signed-off-by: Jike Song Signed-off-by: Thomas Gleixner Cc: Alan Cox Cc: Andi Kleen Cc: Tom Lendacky Cc: Peter Zijlstra Cc: Tim Chen Cc: Jiri Koshina Cc: Dave Hansen Cc: Borislav Petkov Cc: Kees Cook Cc: Andi Lutomirski Cc: Linus Torvalds Cc: Greg KH Cc: David Woodhouse Cc: Paul Turner Link: https://lkml.kernel.org/r/20180108160341.3461-1-albcamus@gmail.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/pti.c | 32 ++++++-------------------------- 1 file changed, 6 insertions(+), 26 deletions(-) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 43d4a4a29037ed..ce38f165489b5a 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -149,7 +149,7 @@ pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) * * Returns a pointer to a P4D on success, or NULL on failure. */ -static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) +static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) { pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); @@ -164,12 +164,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) if (!new_p4d_page) return NULL; - if (pgd_none(*pgd)) { - set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); - new_p4d_page = 0; - } - if (new_p4d_page) - free_page(new_p4d_page); + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); } BUILD_BUG_ON(pgd_large(*pgd) != 0); @@ -182,7 +177,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) * * Returns a pointer to a PMD on success, or NULL on failure. */ -static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) +static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) { gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); p4d_t *p4d = pti_user_pagetable_walk_p4d(address); @@ -194,12 +189,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) if (!new_pud_page) return NULL; - if (p4d_none(*p4d)) { - set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page))); - new_pud_page = 0; - } - if (new_pud_page) - free_page(new_pud_page); + set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page))); } pud = pud_offset(p4d, address); @@ -213,12 +203,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) if (!new_pmd_page) return NULL; - if (pud_none(*pud)) { - set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page))); - new_pmd_page = 0; - } - if (new_pmd_page) - free_page(new_pmd_page); + set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page))); } return pmd_offset(pud, address); @@ -251,12 +236,7 @@ static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) if (!new_pte_page) return NULL; - if (pmd_none(*pmd)) { - set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page))); - new_pte_page = 0; - } - if (new_pte_page) - free_page(new_pte_page); + set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page))); } pte = pte_offset_kernel(pmd, address); From 649c1de819afc90cb6699dc04f5340009dd3783c Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 8 Jan 2018 16:09:21 -0600 Subject: [PATCH 1351/3326] x86/cpu/AMD: Make LFENCE a serializing instruction commit e4d0e84e490790798691aaa0f2e598637f1867ec upstream. To aid in speculation control, make LFENCE a serializing instruction since it has less overhead than MFENCE. This is done by setting bit 1 of MSR 0xc0011029 (DE_CFG). Some families that support LFENCE do not have this MSR. For these families, the LFENCE instruction is already serializing. Signed-off-by: Tom Lendacky Signed-off-by: Thomas Gleixner Reviewed-by: Reviewed-by: Borislav Petkov Cc: Peter Zijlstra Cc: Tim Chen Cc: Dave Hansen Cc: Borislav Petkov Cc: Dan Williams Cc: Linus Torvalds Cc: Greg Kroah-Hartman Cc: David Woodhouse Cc: Paul Turner Link: https://lkml.kernel.org/r/20180108220921.12580.71694.stgit@tlendack-t1.amdoffice.net Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/msr-index.h | 2 ++ arch/x86/kernel/cpu/amd.c | 10 ++++++++++ 2 files changed, 12 insertions(+) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index ab022618a50af9..1e7d710fef4306 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -352,6 +352,8 @@ #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL #define FAM10H_MMIO_CONF_BASE_SHIFT 20 #define MSR_FAM10H_NODE_ID 0xc001100c +#define MSR_F10H_DECFG 0xc0011029 +#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1 /* K8 MSRs */ #define MSR_K8_TOP_MEM1 0xc001001a diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index bcb75dc97d4407..5b438d81beb227 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -829,6 +829,16 @@ static void init_amd(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_K8); if (cpu_has(c, X86_FEATURE_XMM2)) { + /* + * A serializing LFENCE has less overhead than MFENCE, so + * use it for execution serialization. On families which + * don't have that MSR, LFENCE is already serializing. + * msr_set_bit() uses the safe accessors, too, even if the MSR + * is not present. + */ + msr_set_bit(MSR_F10H_DECFG, + MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); + /* MFENCE stops RDTSC speculation */ set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); } From 268114cdb3086ef54652b214f4f1e5d0a961c5f8 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 8 Jan 2018 16:09:32 -0600 Subject: [PATCH 1352/3326] x86/cpu/AMD: Use LFENCE_RDTSC in preference to MFENCE_RDTSC commit 9c6a73c75864ad9fa49e5fa6513e4c4071c0e29f upstream. With LFENCE now a serializing instruction, use LFENCE_RDTSC in preference to MFENCE_RDTSC. However, since the kernel could be running under a hypervisor that does not support writing that MSR, read the MSR back and verify that the bit has been set successfully. If the MSR can be read and the bit is set, then set the LFENCE_RDTSC feature, otherwise set the MFENCE_RDTSC feature. Signed-off-by: Tom Lendacky Signed-off-by: Thomas Gleixner Reviewed-by: Reviewed-by: Borislav Petkov Cc: Peter Zijlstra Cc: Tim Chen Cc: Dave Hansen Cc: Borislav Petkov Cc: Dan Williams Cc: Linus Torvalds Cc: Greg Kroah-Hartman Cc: David Woodhouse Cc: Paul Turner Link: https://lkml.kernel.org/r/20180108220932.12580.52458.stgit@tlendack-t1.amdoffice.net Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/msr-index.h | 1 + arch/x86/kernel/cpu/amd.c | 18 ++++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 1e7d710fef4306..fa11fb1fa570e9 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -354,6 +354,7 @@ #define MSR_FAM10H_NODE_ID 0xc001100c #define MSR_F10H_DECFG 0xc0011029 #define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1 +#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT) /* K8 MSRs */ #define MSR_K8_TOP_MEM1 0xc001001a diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 5b438d81beb227..ea831c85819583 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -829,6 +829,9 @@ static void init_amd(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_K8); if (cpu_has(c, X86_FEATURE_XMM2)) { + unsigned long long val; + int ret; + /* * A serializing LFENCE has less overhead than MFENCE, so * use it for execution serialization. On families which @@ -839,8 +842,19 @@ static void init_amd(struct cpuinfo_x86 *c) msr_set_bit(MSR_F10H_DECFG, MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); - /* MFENCE stops RDTSC speculation */ - set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); + /* + * Verify that the MSR write was successful (could be running + * under a hypervisor) and only then assume that LFENCE is + * serializing. + */ + ret = rdmsrl_safe(MSR_F10H_DECFG, &val); + if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) { + /* A serializing LFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); + } else { + /* MFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); + } } /* From 31431b7b4665cae7bc197980e42a5f3728258688 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 9 Jan 2018 15:02:51 +0000 Subject: [PATCH 1353/3326] sysfs/cpu: Fix typos in vulnerability documentation commit 9ecccfaa7cb5249bd31bdceb93fcf5bedb8a24d8 upstream. Fixes: 87590ce6e ("sysfs/cpu: Add vulnerability folder") Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Signed-off-by: Greg Kroah-Hartman --- Documentation/ABI/testing/sysfs-devices-system-cpu | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index bd3a88e16d8b45..258902db14bf18 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -378,7 +378,7 @@ What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/meltdown /sys/devices/system/cpu/vulnerabilities/spectre_v1 /sys/devices/system/cpu/vulnerabilities/spectre_v2 -Date: Januar 2018 +Date: January 2018 Contact: Linux kernel mailing list Description: Information about CPU vulnerabilities @@ -388,4 +388,4 @@ Description: Information about CPU vulnerabilities "Not affected" CPU is not affected by the vulnerability "Vulnerable" CPU is affected and no mitigation in effect - "Mitigation: $M" CPU is affetcted and mitigation $M is in effect + "Mitigation: $M" CPU is affected and mitigation $M is in effect From b50f563f7c1a4dc307ba2d7f3f9b8ef2f98a2aa2 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 10 Jan 2018 12:28:16 +0100 Subject: [PATCH 1354/3326] x86/alternatives: Fix optimize_nops() checking commit 612e8e9350fd19cae6900cf36ea0c6892d1a0dca upstream. The alternatives code checks only the first byte whether it is a NOP, but with NOPs in front of the payload and having actual instructions after it breaks the "optimized' test. Make sure to scan all bytes before deciding to optimize the NOPs in there. Reported-by: David Woodhouse Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: Tom Lendacky Cc: Andi Kleen Cc: Tim Chen Cc: Peter Zijlstra Cc: Jiri Kosina Cc: Dave Hansen Cc: Andi Kleen Cc: Andrew Lutomirski Cc: Linus Torvalds Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/20180110112815.mgciyf5acwacphkq@pd.tnic Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/alternative.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 3344d3382e9139..e0b97e4d1db556 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -344,9 +344,12 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) { unsigned long flags; + int i; - if (instr[0] != 0x90) - return; + for (i = 0; i < a->padlen; i++) { + if (instr[i] != 0x90) + return; + } local_irq_save(flags); add_nops(instr + (a->instrlen - a->padlen), a->padlen); From 22e64ef9cdc3eee09e230855e01f1a3826494ce7 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Wed, 10 Jan 2018 14:49:39 -0800 Subject: [PATCH 1355/3326] x86/pti: Make unpoison of pgd for trusted boot work for real commit 445b69e3b75e42362a5bdc13c8b8f61599e2228a upstream. The inital fix for trusted boot and PTI potentially misses the pgd clearing if pud_alloc() sets a PGD. It probably works in *practice* because for two adjacent calls to map_tboot_page() that share a PGD entry, the first will clear NX, *then* allocate and set the PGD (without NX clear). The second call will *not* allocate but will clear the NX bit. Defer the NX clearing to a point after it is known that all top-level allocations have occurred. Add a comment to clarify why. [ tglx: Massaged changelog ] Fixes: 262b6b30087 ("x86/tboot: Unbreak tboot with PTI enabled") Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Reviewed-by: Andrea Arcangeli Cc: Jon Masters Cc: "Tim Chen" Cc: gnomes@lxorguk.ukuu.org.uk Cc: peterz@infradead.org Cc: ning.sun@intel.com Cc: tboot-devel@lists.sourceforge.net Cc: andi@firstfloor.org Cc: luto@kernel.org Cc: law@redhat.com Cc: pbonzini@redhat.com Cc: torvalds@linux-foundation.org Cc: gregkh@linux-foundation.org Cc: dwmw@amazon.co.uk Cc: nickc@redhat.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180110224939.2695CD47@viggo.jf.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/tboot.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 75869a4b6c4188..a2486f44407347 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -127,7 +127,6 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn, p4d = p4d_alloc(&tboot_mm, pgd, vaddr); if (!p4d) return -1; - pgd->pgd &= ~_PAGE_NX; pud = pud_alloc(&tboot_mm, p4d, vaddr); if (!pud) return -1; @@ -139,6 +138,17 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn, return -1; set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); pte_unmap(pte); + + /* + * PTI poisons low addresses in the kernel page tables in the + * name of making them unusable for userspace. To execute + * code at such a low address, the poison must be cleared. + * + * Note: 'pgd' actually gets set in p4d_alloc() _or_ + * pud_alloc() depending on 4/5-level paging. + */ + pgd->pgd &= ~_PAGE_NX; + return 0; } From 6a4d11820d12836785f64a609eecbcadc0d526d0 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Thu, 11 Jan 2018 21:46:23 +0000 Subject: [PATCH 1356/3326] objtool: Detect jumps to retpoline thunks commit 39b735332cb8b33a27c28592d969e4016c86c3ea upstream. A direct jump to a retpoline thunk is really an indirect jump in disguise. Change the objtool instruction type accordingly. Objtool needs to know where indirect branches are so it can detect switch statement jump tables. This fixes a bunch of warnings with CONFIG_RETPOLINE like: arch/x86/events/intel/uncore_nhmex.o: warning: objtool: nhmex_rbox_msr_enable_event()+0x44: sibling call from callable instruction with modified stack frame kernel/signal.o: warning: objtool: copy_siginfo_to_user()+0x91: sibling call from callable instruction with modified stack frame ... Signed-off-by: Josh Poimboeuf Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: thomas.lendacky@amd.com Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515707194-20531-2-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- tools/objtool/check.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 9b341584eb1b56..de053fb7049b04 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -456,6 +456,13 @@ static int add_jump_destinations(struct objtool_file *file) } else if (rela->sym->sec->idx) { dest_sec = rela->sym->sec; dest_off = rela->sym->sym.st_value + rela->addend + 4; + } else if (strstr(rela->sym->name, "_indirect_thunk_")) { + /* + * Retpoline jumps are really dynamic jumps in + * disguise, so convert them accordingly. + */ + insn->type = INSN_JUMP_DYNAMIC; + continue; } else { /* sibling call */ insn->jump_dest = 0; From 6b95f61a41349ef4a115520ac4868bf52555eab9 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Thu, 11 Jan 2018 21:46:24 +0000 Subject: [PATCH 1357/3326] objtool: Allow alternatives to be ignored commit 258c76059cece01bebae098e81bacb1af2edad17 upstream. Getting objtool to understand retpolines is going to be a bit of a challenge. For now, take advantage of the fact that retpolines are patched in with alternatives. Just read the original (sane) non-alternative instruction, and ignore the patched-in retpoline. This allows objtool to understand the control flow *around* the retpoline, even if it can't yet follow what's inside. This means the ORC unwinder will fail to unwind from inside a retpoline, but will work fine otherwise. Signed-off-by: Josh Poimboeuf Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: thomas.lendacky@amd.com Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515707194-20531-3-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- tools/objtool/check.c | 62 ++++++++++++++++++++++++++++++++++++++----- tools/objtool/check.h | 2 +- 2 files changed, 57 insertions(+), 7 deletions(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index de053fb7049b04..f40d46e24bcce3 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -427,6 +427,40 @@ static void add_ignores(struct objtool_file *file) } } +/* + * FIXME: For now, just ignore any alternatives which add retpolines. This is + * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline. + * But it at least allows objtool to understand the control flow *around* the + * retpoline. + */ +static int add_nospec_ignores(struct objtool_file *file) +{ + struct section *sec; + struct rela *rela; + struct instruction *insn; + + sec = find_section_by_name(file->elf, ".rela.discard.nospec"); + if (!sec) + return 0; + + list_for_each_entry(rela, &sec->rela_list, list) { + if (rela->sym->type != STT_SECTION) { + WARN("unexpected relocation symbol type in %s", sec->name); + return -1; + } + + insn = find_insn(file, rela->sym->sec, rela->addend); + if (!insn) { + WARN("bad .discard.nospec entry"); + return -1; + } + + insn->ignore_alts = true; + } + + return 0; +} + /* * Find the destination instructions for all jumps. */ @@ -509,11 +543,18 @@ static int add_call_destinations(struct objtool_file *file) dest_off = insn->offset + insn->len + insn->immediate; insn->call_dest = find_symbol_by_offset(insn->sec, dest_off); + /* + * FIXME: Thanks to retpolines, it's now considered + * normal for a function to call within itself. So + * disable this warning for now. + */ +#if 0 if (!insn->call_dest) { WARN_FUNC("can't find call dest symbol at offset 0x%lx", insn->sec, insn->offset, dest_off); return -1; } +#endif } else if (rela->sym->type == STT_SECTION) { insn->call_dest = find_symbol_by_offset(rela->sym->sec, rela->addend+4); @@ -678,12 +719,6 @@ static int add_special_section_alts(struct objtool_file *file) return ret; list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { - alt = malloc(sizeof(*alt)); - if (!alt) { - WARN("malloc failed"); - ret = -1; - goto out; - } orig_insn = find_insn(file, special_alt->orig_sec, special_alt->orig_off); @@ -694,6 +729,10 @@ static int add_special_section_alts(struct objtool_file *file) goto out; } + /* Ignore retpoline alternatives. */ + if (orig_insn->ignore_alts) + continue; + new_insn = NULL; if (!special_alt->group || special_alt->new_len) { new_insn = find_insn(file, special_alt->new_sec, @@ -719,6 +758,13 @@ static int add_special_section_alts(struct objtool_file *file) goto out; } + alt = malloc(sizeof(*alt)); + if (!alt) { + WARN("malloc failed"); + ret = -1; + goto out; + } + alt->insn = new_insn; list_add_tail(&alt->list, &orig_insn->alts); @@ -1035,6 +1081,10 @@ static int decode_sections(struct objtool_file *file) add_ignores(file); + ret = add_nospec_ignores(file); + if (ret) + return ret; + ret = add_jump_destinations(file); if (ret) return ret; diff --git a/tools/objtool/check.h b/tools/objtool/check.h index 47d9ea70a83d9f..dbadb304a410af 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h @@ -44,7 +44,7 @@ struct instruction { unsigned int len; unsigned char type; unsigned long immediate; - bool alt_group, visited, dead_end, ignore, hint, save, restore; + bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts; struct symbol *call_dest; struct instruction *jump_dest; struct list_head alts; From 3a72bd4b60da338e66922e4f9eded174b3ad147d Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 11 Jan 2018 21:46:25 +0000 Subject: [PATCH 1358/3326] x86/retpoline: Add initial retpoline support commit 76b043848fd22dbf7f8bf3a1452f8c70d557b860 upstream. Enable the use of -mindirect-branch=thunk-extern in newer GCC, and provide the corresponding thunks. Provide assembler macros for invoking the thunks in the same way that GCC does, from native and inline assembler. This adds X86_FEATURE_RETPOLINE and sets it by default on all CPUs. In some circumstances, IBRS microcode features may be used instead, and the retpoline can be disabled. On AMD CPUs if lfence is serialising, the retpoline can be dramatically simplified to a simple "lfence; jmp *\reg". A future patch, after it has been verified that lfence really is serialising in all circumstances, can enable this by setting the X86_FEATURE_RETPOLINE_AMD feature bit in addition to X86_FEATURE_RETPOLINE. Do not align the retpoline in the altinstr section, because there is no guarantee that it stays aligned when it's copied over the oldinstr during alternative patching. [ Andi Kleen: Rename the macros, add CONFIG_RETPOLINE option, export thunks] [ tglx: Put actual function CALL/JMP in front of the macros, convert to symbolic labels ] [ dwmw2: Convert back to numeric labels, merge objtool fixes ] Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Acked-by: Arjan van de Ven Acked-by: Ingo Molnar Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: Josh Poimboeuf Cc: thomas.lendacky@amd.com Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515707194-20531-4-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/Kconfig | 13 +++ arch/x86/Makefile | 10 ++ arch/x86/include/asm/asm-prototypes.h | 25 +++++ arch/x86/include/asm/cpufeatures.h | 2 + arch/x86/include/asm/nospec-branch.h | 128 ++++++++++++++++++++++++++ arch/x86/kernel/cpu/common.c | 4 + arch/x86/lib/Makefile | 1 + arch/x86/lib/retpoline.S | 48 ++++++++++ 8 files changed, 231 insertions(+) create mode 100644 arch/x86/include/asm/nospec-branch.h create mode 100644 arch/x86/lib/retpoline.S diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7184ebb0cd7846..17de6acc0eab62 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -429,6 +429,19 @@ config GOLDFISH def_bool y depends on X86_GOLDFISH +config RETPOLINE + bool "Avoid speculative indirect branches in kernel" + default y + help + Compile kernel with the retpoline compiler options to guard against + kernel-to-user data leaks by avoiding speculative indirect + branches. Requires a compiler with -mindirect-branch=thunk-extern + support for full protection. The kernel may run slower. + + Without compiler support, at least indirect branches in assembler + code are eliminated. Since this includes the syscall entry path, + it is not entirely pointless. + config INTEL_RDT bool "Intel Resource Director Technology support" default n diff --git a/arch/x86/Makefile b/arch/x86/Makefile index a20eacd9c7e9a7..974c61864978c4 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -235,6 +235,16 @@ KBUILD_CFLAGS += -Wno-sign-compare # KBUILD_CFLAGS += -fno-asynchronous-unwind-tables +# Avoid indirect branches in kernel to deal with Spectre +ifdef CONFIG_RETPOLINE + RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) + ifneq ($(RETPOLINE_CFLAGS),) + KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE + else + $(warning CONFIG_RETPOLINE=y, but not supported by the compiler. Toolchain update recommended.) + endif +endif + archscripts: scripts_basic $(Q)$(MAKE) $(build)=arch/x86/tools relocs diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index ff700d81e91efc..0927cdc4f94601 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h @@ -11,7 +11,32 @@ #include #include #include +#include #ifndef CONFIG_X86_CMPXCHG64 extern void cmpxchg8b_emu(void); #endif + +#ifdef CONFIG_RETPOLINE +#ifdef CONFIG_X86_32 +#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void); +#else +#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void); +INDIRECT_THUNK(8) +INDIRECT_THUNK(9) +INDIRECT_THUNK(10) +INDIRECT_THUNK(11) +INDIRECT_THUNK(12) +INDIRECT_THUNK(13) +INDIRECT_THUNK(14) +INDIRECT_THUNK(15) +#endif +INDIRECT_THUNK(ax) +INDIRECT_THUNK(bx) +INDIRECT_THUNK(cx) +INDIRECT_THUNK(dx) +INDIRECT_THUNK(si) +INDIRECT_THUNK(di) +INDIRECT_THUNK(bp) +INDIRECT_THUNK(sp) +#endif /* CONFIG_RETPOLINE */ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 1641c2f9636347..f275447862f458 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -203,6 +203,8 @@ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ +#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ #define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h new file mode 100644 index 00000000000000..e20e92ef2ca8ba --- /dev/null +++ b/arch/x86/include/asm/nospec-branch.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __NOSPEC_BRANCH_H__ +#define __NOSPEC_BRANCH_H__ + +#include +#include +#include + +#ifdef __ASSEMBLY__ + +/* + * This should be used immediately before a retpoline alternative. It tells + * objtool where the retpolines are so that it can make sense of the control + * flow by just reading the original instruction(s) and ignoring the + * alternatives. + */ +.macro ANNOTATE_NOSPEC_ALTERNATIVE + .Lannotate_\@: + .pushsection .discard.nospec + .long .Lannotate_\@ - . + .popsection +.endm + +/* + * These are the bare retpoline primitives for indirect jmp and call. + * Do not use these directly; they only exist to make the ALTERNATIVE + * invocation below less ugly. + */ +.macro RETPOLINE_JMP reg:req + call .Ldo_rop_\@ +.Lspec_trap_\@: + pause + jmp .Lspec_trap_\@ +.Ldo_rop_\@: + mov \reg, (%_ASM_SP) + ret +.endm + +/* + * This is a wrapper around RETPOLINE_JMP so the called function in reg + * returns to the instruction after the macro. + */ +.macro RETPOLINE_CALL reg:req + jmp .Ldo_call_\@ +.Ldo_retpoline_jmp_\@: + RETPOLINE_JMP \reg +.Ldo_call_\@: + call .Ldo_retpoline_jmp_\@ +.endm + +/* + * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple + * indirect jmp/call which may be susceptible to the Spectre variant 2 + * attack. + */ +.macro JMP_NOSPEC reg:req +#ifdef CONFIG_RETPOLINE + ANNOTATE_NOSPEC_ALTERNATIVE + ALTERNATIVE_2 __stringify(jmp *\reg), \ + __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ + __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD +#else + jmp *\reg +#endif +.endm + +.macro CALL_NOSPEC reg:req +#ifdef CONFIG_RETPOLINE + ANNOTATE_NOSPEC_ALTERNATIVE + ALTERNATIVE_2 __stringify(call *\reg), \ + __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\ + __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD +#else + call *\reg +#endif +.endm + +#else /* __ASSEMBLY__ */ + +#define ANNOTATE_NOSPEC_ALTERNATIVE \ + "999:\n\t" \ + ".pushsection .discard.nospec\n\t" \ + ".long 999b - .\n\t" \ + ".popsection\n\t" + +#if defined(CONFIG_X86_64) && defined(RETPOLINE) + +/* + * Since the inline asm uses the %V modifier which is only in newer GCC, + * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE. + */ +# define CALL_NOSPEC \ + ANNOTATE_NOSPEC_ALTERNATIVE \ + ALTERNATIVE( \ + "call *%[thunk_target]\n", \ + "call __x86_indirect_thunk_%V[thunk_target]\n", \ + X86_FEATURE_RETPOLINE) +# define THUNK_TARGET(addr) [thunk_target] "r" (addr) + +#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE) +/* + * For i386 we use the original ret-equivalent retpoline, because + * otherwise we'll run out of registers. We don't care about CET + * here, anyway. + */ +# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \ + " jmp 904f;\n" \ + " .align 16\n" \ + "901: call 903f;\n" \ + "902: pause;\n" \ + " jmp 902b;\n" \ + " .align 16\n" \ + "903: addl $4, %%esp;\n" \ + " pushl %[thunk_target];\n" \ + " ret;\n" \ + " .align 16\n" \ + "904: call 901b;\n", \ + X86_FEATURE_RETPOLINE) + +# define THUNK_TARGET(addr) [thunk_target] "rm" (addr) +#else /* No retpoline */ +# define CALL_NOSPEC "call *%[thunk_target]\n" +# define THUNK_TARGET(addr) [thunk_target] "rm" (addr) +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* __NOSPEC_BRANCH_H__ */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 372ba3fb400fad..7a671d1ae3cba1 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -905,6 +905,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) setup_force_cpu_bug(X86_BUG_SPECTRE_V1); setup_force_cpu_bug(X86_BUG_SPECTRE_V2); +#ifdef CONFIG_RETPOLINE + setup_force_cpu_cap(X86_FEATURE_RETPOLINE); +#endif + fpu__init_system(c); #ifdef CONFIG_X86_32 diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 457f681ef37921..d435c89875c148 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -26,6 +26,7 @@ lib-y += memcpy_$(BITS).o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o +lib-$(CONFIG_RETPOLINE) += retpoline.o obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S new file mode 100644 index 00000000000000..cb45c6cb465f4b --- /dev/null +++ b/arch/x86/lib/retpoline.S @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include +#include +#include +#include +#include +#include + +.macro THUNK reg + .section .text.__x86.indirect_thunk.\reg + +ENTRY(__x86_indirect_thunk_\reg) + CFI_STARTPROC + JMP_NOSPEC %\reg + CFI_ENDPROC +ENDPROC(__x86_indirect_thunk_\reg) +.endm + +/* + * Despite being an assembler file we can't just use .irp here + * because __KSYM_DEPS__ only uses the C preprocessor and would + * only see one instance of "__x86_indirect_thunk_\reg" rather + * than one per register with the correct names. So we do it + * the simple and nasty way... + */ +#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg) +#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg) + +GENERATE_THUNK(_ASM_AX) +GENERATE_THUNK(_ASM_BX) +GENERATE_THUNK(_ASM_CX) +GENERATE_THUNK(_ASM_DX) +GENERATE_THUNK(_ASM_SI) +GENERATE_THUNK(_ASM_DI) +GENERATE_THUNK(_ASM_BP) +GENERATE_THUNK(_ASM_SP) +#ifdef CONFIG_64BIT +GENERATE_THUNK(r8) +GENERATE_THUNK(r9) +GENERATE_THUNK(r10) +GENERATE_THUNK(r11) +GENERATE_THUNK(r12) +GENERATE_THUNK(r13) +GENERATE_THUNK(r14) +GENERATE_THUNK(r15) +#endif From dcd4311d0e646ae9e739f7238002b664613f2355 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 11 Jan 2018 21:46:26 +0000 Subject: [PATCH 1359/3326] x86/spectre: Add boot time option to select Spectre v2 mitigation commit da285121560e769cc31797bba6422eea71d473e0 upstream. Add a spectre_v2= option to select the mitigation used for the indirect branch speculation vulnerability. Currently, the only option available is retpoline, in its various forms. This will be expanded to cover the new IBRS/IBPB microcode features. The RETPOLINE_AMD feature relies on a serializing LFENCE for speculation control. For AMD hardware, only set RETPOLINE_AMD if LFENCE is a serializing instruction, which is indicated by the LFENCE_RDTSC feature. [ tglx: Folded back the LFENCE/AMD fixes and reworked it so IBRS integration becomes simple ] Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: Josh Poimboeuf Cc: thomas.lendacky@amd.com Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515707194-20531-5-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- .../admin-guide/kernel-parameters.txt | 28 ++++ arch/x86/include/asm/nospec-branch.h | 10 ++ arch/x86/kernel/cpu/bugs.c | 158 +++++++++++++++++- arch/x86/kernel/cpu/common.c | 4 - 4 files changed, 195 insertions(+), 5 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 905991745d261d..8122b5f98ea1eb 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2599,6 +2599,11 @@ nosmt [KNL,S390] Disable symmetric multithreading (SMT). Equivalent to smt=1. + nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2 + (indirect branch prediction) vulnerability. System may + allow data leaks with this option, which is equivalent + to spectre_v2=off. + noxsave [BUGS=X86] Disables x86 extended register state save and restore using xsave. The kernel will fallback to enabling legacy floating-point and sse state. @@ -3908,6 +3913,29 @@ sonypi.*= [HW] Sony Programmable I/O Control Device driver See Documentation/laptops/sonypi.txt + spectre_v2= [X86] Control mitigation of Spectre variant 2 + (indirect branch speculation) vulnerability. + + on - unconditionally enable + off - unconditionally disable + auto - kernel detects whether your CPU model is + vulnerable + + Selecting 'on' will, and 'auto' may, choose a + mitigation method at run time according to the + CPU, the available microcode, the setting of the + CONFIG_RETPOLINE configuration option, and the + compiler with which the kernel was built. + + Specific mitigations can also be selected manually: + + retpoline - replace indirect branches + retpoline,generic - google's original retpoline + retpoline,amd - AMD-specific minimal thunk + + Not specifying this option is equivalent to + spectre_v2=auto. + spia_io_base= [HW,MTD] spia_fio_base= spia_pedr= diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index e20e92ef2ca8ba..ea034fa6e261e7 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -124,5 +124,15 @@ # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) #endif +/* The Spectre V2 mitigation variants */ +enum spectre_v2_mitigation { + SPECTRE_V2_NONE, + SPECTRE_V2_RETPOLINE_MINIMAL, + SPECTRE_V2_RETPOLINE_MINIMAL_AMD, + SPECTRE_V2_RETPOLINE_GENERIC, + SPECTRE_V2_RETPOLINE_AMD, + SPECTRE_V2_IBRS, +}; + #endif /* __ASSEMBLY__ */ #endif /* __NOSPEC_BRANCH_H__ */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 76ad6cb44b4026..e4dc26185aa70b 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -11,6 +11,9 @@ #include #include #include + +#include +#include #include #include #include @@ -21,6 +24,8 @@ #include #include +static void __init spectre_v2_select_mitigation(void); + void __init check_bugs(void) { identify_boot_cpu(); @@ -30,6 +35,9 @@ void __init check_bugs(void) print_cpu_info(&boot_cpu_data); } + /* Select the proper spectre mitigation before patching alternatives */ + spectre_v2_select_mitigation(); + #ifdef CONFIG_X86_32 /* * Check whether we are able to run this kernel safely on SMP. @@ -62,6 +70,153 @@ void __init check_bugs(void) #endif } +/* The kernel command line selection */ +enum spectre_v2_mitigation_cmd { + SPECTRE_V2_CMD_NONE, + SPECTRE_V2_CMD_AUTO, + SPECTRE_V2_CMD_FORCE, + SPECTRE_V2_CMD_RETPOLINE, + SPECTRE_V2_CMD_RETPOLINE_GENERIC, + SPECTRE_V2_CMD_RETPOLINE_AMD, +}; + +static const char *spectre_v2_strings[] = { + [SPECTRE_V2_NONE] = "Vulnerable", + [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline", + [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", + [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", + [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", +}; + +#undef pr_fmt +#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt + +static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; + +static void __init spec2_print_if_insecure(const char *reason) +{ + if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + pr_info("%s\n", reason); +} + +static void __init spec2_print_if_secure(const char *reason) +{ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + pr_info("%s\n", reason); +} + +static inline bool retp_compiler(void) +{ + return __is_defined(RETPOLINE); +} + +static inline bool match_option(const char *arg, int arglen, const char *opt) +{ + int len = strlen(opt); + + return len == arglen && !strncmp(arg, opt, len); +} + +static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) +{ + char arg[20]; + int ret; + + ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, + sizeof(arg)); + if (ret > 0) { + if (match_option(arg, ret, "off")) { + goto disable; + } else if (match_option(arg, ret, "on")) { + spec2_print_if_secure("force enabled on command line."); + return SPECTRE_V2_CMD_FORCE; + } else if (match_option(arg, ret, "retpoline")) { + spec2_print_if_insecure("retpoline selected on command line."); + return SPECTRE_V2_CMD_RETPOLINE; + } else if (match_option(arg, ret, "retpoline,amd")) { + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { + pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); + return SPECTRE_V2_CMD_AUTO; + } + spec2_print_if_insecure("AMD retpoline selected on command line."); + return SPECTRE_V2_CMD_RETPOLINE_AMD; + } else if (match_option(arg, ret, "retpoline,generic")) { + spec2_print_if_insecure("generic retpoline selected on command line."); + return SPECTRE_V2_CMD_RETPOLINE_GENERIC; + } else if (match_option(arg, ret, "auto")) { + return SPECTRE_V2_CMD_AUTO; + } + } + + if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2")) + return SPECTRE_V2_CMD_AUTO; +disable: + spec2_print_if_insecure("disabled on command line."); + return SPECTRE_V2_CMD_NONE; +} + +static void __init spectre_v2_select_mitigation(void) +{ + enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); + enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; + + /* + * If the CPU is not affected and the command line mode is NONE or AUTO + * then nothing to do. + */ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && + (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) + return; + + switch (cmd) { + case SPECTRE_V2_CMD_NONE: + return; + + case SPECTRE_V2_CMD_FORCE: + /* FALLTRHU */ + case SPECTRE_V2_CMD_AUTO: + goto retpoline_auto; + + case SPECTRE_V2_CMD_RETPOLINE_AMD: + if (IS_ENABLED(CONFIG_RETPOLINE)) + goto retpoline_amd; + break; + case SPECTRE_V2_CMD_RETPOLINE_GENERIC: + if (IS_ENABLED(CONFIG_RETPOLINE)) + goto retpoline_generic; + break; + case SPECTRE_V2_CMD_RETPOLINE: + if (IS_ENABLED(CONFIG_RETPOLINE)) + goto retpoline_auto; + break; + } + pr_err("kernel not compiled with retpoline; no mitigation available!"); + return; + +retpoline_auto: + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { + retpoline_amd: + if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { + pr_err("LFENCE not serializing. Switching to generic retpoline\n"); + goto retpoline_generic; + } + mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : + SPECTRE_V2_RETPOLINE_MINIMAL_AMD; + setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); + setup_force_cpu_cap(X86_FEATURE_RETPOLINE); + } else { + retpoline_generic: + mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : + SPECTRE_V2_RETPOLINE_MINIMAL; + setup_force_cpu_cap(X86_FEATURE_RETPOLINE); + } + + spectre_v2_enabled = mode; + pr_info("%s\n", spectre_v2_strings[mode]); +} + +#undef pr_fmt + #ifdef CONFIG_SYSFS ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) @@ -86,6 +241,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, { if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) return sprintf(buf, "Not affected\n"); - return sprintf(buf, "Vulnerable\n"); + + return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]); } #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 7a671d1ae3cba1..372ba3fb400fad 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -905,10 +905,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) setup_force_cpu_bug(X86_BUG_SPECTRE_V1); setup_force_cpu_bug(X86_BUG_SPECTRE_V2); -#ifdef CONFIG_RETPOLINE - setup_force_cpu_cap(X86_FEATURE_RETPOLINE); -#endif - fpu__init_system(c); #ifdef CONFIG_X86_32 From 79f2c12856404c5acfbc33e021108c0810a6d2b8 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 11 Jan 2018 21:46:27 +0000 Subject: [PATCH 1360/3326] x86/retpoline/crypto: Convert crypto assembler indirect jumps commit 9697fa39efd3fc3692f2949d4045f393ec58450b upstream. Convert all indirect jumps in crypto assembler code to use non-speculative sequences when CONFIG_RETPOLINE is enabled. Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Acked-by: Arjan van de Ven Acked-by: Ingo Molnar Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: Josh Poimboeuf Cc: thomas.lendacky@amd.com Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515707194-20531-6-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/crypto/aesni-intel_asm.S | 5 +++-- arch/x86/crypto/camellia-aesni-avx-asm_64.S | 3 ++- arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 3 ++- arch/x86/crypto/crc32c-pcl-intel-asm_64.S | 3 ++- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 16627fec80b26b..3d09e3aca18dad 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -32,6 +32,7 @@ #include #include #include +#include /* * The following macros are used to move an (un)aligned 16 byte value to/from @@ -2884,7 +2885,7 @@ ENTRY(aesni_xts_crypt8) pxor INC, STATE4 movdqu IV, 0x30(OUTP) - call *%r11 + CALL_NOSPEC %r11 movdqu 0x00(OUTP), INC pxor INC, STATE1 @@ -2929,7 +2930,7 @@ ENTRY(aesni_xts_crypt8) _aesni_gf128mul_x_ble() movups IV, (IVP) - call *%r11 + CALL_NOSPEC %r11 movdqu 0x40(OUTP), INC pxor INC, STATE1 diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index f7c495e2863cb0..a14af6eb09cb07 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -17,6 +17,7 @@ #include #include +#include #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way: vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; - call *%r9; + CALL_NOSPEC %r9; addq $(16 * 16), %rsp; diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index eee5b3982cfd3c..b66bbfa62f50d7 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -12,6 +12,7 @@ #include #include +#include #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -1343,7 +1344,7 @@ camellia_xts_crypt_32way: vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; - call *%r9; + CALL_NOSPEC %r9; addq $(16 * 32), %rsp; diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index 7a7de27c6f4152..d9b734d0c8cc78 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -45,6 +45,7 @@ #include #include +#include ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction @@ -172,7 +173,7 @@ continue_block: movzxw (bufp, %rax, 2), len lea crc_array(%rip), bufp lea (bufp, len, 1), bufp - jmp *bufp + JMP_NOSPEC bufp ################################################################ ## 2a) PROCESS FULL BLOCKS: From d4edaa981b8749acd8fdd7ab8d2be2585ebeeb9e Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 11 Jan 2018 21:46:28 +0000 Subject: [PATCH 1361/3326] x86/retpoline/entry: Convert entry assembler indirect jumps commit 2641f08bb7fc63a636a2b18173221d7040a3512e upstream. Convert indirect jumps in core 32/64bit entry assembler code to use non-speculative sequences when CONFIG_RETPOLINE is enabled. Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return address after the 'call' instruction must be *precisely* at the .Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work, and the use of alternatives will mess that up unless we play horrid games to prepend with NOPs and make the variants the same length. It's not worth it; in the case where we ALTERNATIVE out the retpoline, the first instruction at __x86.indirect_thunk.rax is going to be a bare jmp *%rax anyway. Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Acked-by: Arjan van de Ven Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: Josh Poimboeuf Cc: thomas.lendacky@amd.com Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515707194-20531-7-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_32.S | 5 +++-- arch/x86/entry/entry_64.S | 12 +++++++++--- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index ace8f321a5a1f2..a1f28a54f23a42 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -44,6 +44,7 @@ #include #include #include +#include .section .entry.text, "ax" @@ -290,7 +291,7 @@ ENTRY(ret_from_fork) /* kernel thread */ 1: movl %edi, %eax - call *%ebx + CALL_NOSPEC %ebx /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() @@ -919,7 +920,7 @@ common_exception: movl %ecx, %es TRACE_IRQS_OFF movl %esp, %eax # pt_regs pointer - call *%edi + CALL_NOSPEC %edi jmp ret_from_exception END(common_exception) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index dd696b966e5835..f5fda5f26e3468 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -37,6 +37,7 @@ #include #include #include +#include #include #include "calling.h" @@ -187,7 +188,7 @@ ENTRY(entry_SYSCALL_64_trampoline) */ pushq %rdi movq $entry_SYSCALL_64_stage2, %rdi - jmp *%rdi + JMP_NOSPEC %rdi END(entry_SYSCALL_64_trampoline) .popsection @@ -266,7 +267,12 @@ entry_SYSCALL_64_fastpath: * It might end up jumping to the slow path. If it jumps, RAX * and all argument registers are clobbered. */ +#ifdef CONFIG_RETPOLINE + movq sys_call_table(, %rax, 8), %rax + call __x86_indirect_thunk_rax +#else call *sys_call_table(, %rax, 8) +#endif .Lentry_SYSCALL_64_after_fastpath_call: movq %rax, RAX(%rsp) @@ -438,7 +444,7 @@ ENTRY(stub_ptregs_64) jmp entry_SYSCALL64_slow_path 1: - jmp *%rax /* Called from C */ + JMP_NOSPEC %rax /* Called from C */ END(stub_ptregs_64) .macro ptregs_stub func @@ -517,7 +523,7 @@ ENTRY(ret_from_fork) 1: /* kernel thread */ movq %r12, %rdi - call *%rbx + CALL_NOSPEC %rbx /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() From 82de9301b4ea4389ee889558f770513930bf803f Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 11 Jan 2018 21:46:29 +0000 Subject: [PATCH 1362/3326] x86/retpoline/ftrace: Convert ftrace assembler indirect jumps commit 9351803bd803cdbeb9b5a7850b7b6f464806e3db upstream. Convert all indirect jumps in ftrace assembler code to use non-speculative sequences when CONFIG_RETPOLINE is enabled. Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Acked-by: Arjan van de Ven Acked-by: Ingo Molnar Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: Josh Poimboeuf Cc: thomas.lendacky@amd.com Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515707194-20531-8-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/ftrace_32.S | 6 ++++-- arch/x86/kernel/ftrace_64.S | 8 ++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S index b6c6468e10bc96..4c8440de335599 100644 --- a/arch/x86/kernel/ftrace_32.S +++ b/arch/x86/kernel/ftrace_32.S @@ -8,6 +8,7 @@ #include #include #include +#include #ifdef CC_USING_FENTRY # define function_hook __fentry__ @@ -197,7 +198,8 @@ ftrace_stub: movl 0x4(%ebp), %edx subl $MCOUNT_INSN_SIZE, %eax - call *ftrace_trace_function + movl ftrace_trace_function, %ecx + CALL_NOSPEC %ecx popl %edx popl %ecx @@ -241,5 +243,5 @@ return_to_handler: movl %eax, %ecx popl %edx popl %eax - jmp *%ecx + JMP_NOSPEC %ecx #endif diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index c832291d948a6b..7cb8ba08beb997 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -7,7 +7,7 @@ #include #include #include - +#include .code64 .section .entry.text, "ax" @@ -286,8 +286,8 @@ trace: * ip and parent ip are used and the list function is called when * function tracing is enabled. */ - call *ftrace_trace_function - + movq ftrace_trace_function, %r8 + CALL_NOSPEC %r8 restore_mcount_regs jmp fgraph_trace @@ -329,5 +329,5 @@ GLOBAL(return_to_handler) movq 8(%rsp), %rdx movq (%rsp), %rax addq $24, %rsp - jmp *%rdi + JMP_NOSPEC %rdi #endif From e0dcc73d67359c67d8b81868c2dd388b0c6be1f2 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 11 Jan 2018 21:46:30 +0000 Subject: [PATCH 1363/3326] x86/retpoline/hyperv: Convert assembler indirect jumps commit e70e5892b28c18f517f29ab6e83bd57705104b31 upstream. Convert all indirect jumps in hyperv inline asm code to use non-speculative sequences when CONFIG_RETPOLINE is enabled. Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Acked-by: Arjan van de Ven Acked-by: Ingo Molnar Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: Josh Poimboeuf Cc: thomas.lendacky@amd.com Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515707194-20531-9-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 581bb54dd464e3..5119e4b555cc51 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -7,6 +7,7 @@ #include #include #include +#include /* * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent @@ -186,10 +187,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) return U64_MAX; __asm__ __volatile__("mov %4, %%r8\n" - "call *%5" + CALL_NOSPEC : "=a" (hv_status), ASM_CALL_CONSTRAINT, "+c" (control), "+d" (input_address) - : "r" (output_address), "m" (hv_hypercall_pg) + : "r" (output_address), + THUNK_TARGET(hv_hypercall_pg) : "cc", "memory", "r8", "r9", "r10", "r11"); #else u32 input_address_hi = upper_32_bits(input_address); @@ -200,13 +202,13 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) if (!hv_hypercall_pg) return U64_MAX; - __asm__ __volatile__("call *%7" + __asm__ __volatile__(CALL_NOSPEC : "=A" (hv_status), "+c" (input_address_lo), ASM_CALL_CONSTRAINT : "A" (control), "b" (input_address_hi), "D"(output_address_hi), "S"(output_address_lo), - "m" (hv_hypercall_pg) + THUNK_TARGET(hv_hypercall_pg) : "cc", "memory"); #endif /* !x86_64 */ return hv_status; @@ -227,10 +229,10 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) #ifdef CONFIG_X86_64 { - __asm__ __volatile__("call *%4" + __asm__ __volatile__(CALL_NOSPEC : "=a" (hv_status), ASM_CALL_CONSTRAINT, "+c" (control), "+d" (input1) - : "m" (hv_hypercall_pg) + : THUNK_TARGET(hv_hypercall_pg) : "cc", "r8", "r9", "r10", "r11"); } #else @@ -238,13 +240,13 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) u32 input1_hi = upper_32_bits(input1); u32 input1_lo = lower_32_bits(input1); - __asm__ __volatile__ ("call *%5" + __asm__ __volatile__ (CALL_NOSPEC : "=A"(hv_status), "+c"(input1_lo), ASM_CALL_CONSTRAINT : "A" (control), "b" (input1_hi), - "m" (hv_hypercall_pg) + THUNK_TARGET(hv_hypercall_pg) : "cc", "edi", "esi"); } #endif From 41f8af6a4656e9dd348506b4ebfa4677730994c3 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 11 Jan 2018 21:46:31 +0000 Subject: [PATCH 1364/3326] x86/retpoline/xen: Convert Xen hypercall indirect jumps commit ea08816d5b185ab3d09e95e393f265af54560350 upstream. Convert indirect call in Xen hypercall to use non-speculative sequence, when CONFIG_RETPOLINE is enabled. Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Acked-by: Arjan van de Ven Acked-by: Ingo Molnar Reviewed-by: Juergen Gross Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: Josh Poimboeuf Cc: thomas.lendacky@amd.com Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515707194-20531-10-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/xen/hypercall.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 7cb282e9e58777..bfd88261761392 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -217,9 +218,9 @@ privcmd_call(unsigned call, __HYPERCALL_5ARG(a1, a2, a3, a4, a5); stac(); - asm volatile("call *%[call]" + asm volatile(CALL_NOSPEC : __HYPERCALL_5PARAM - : [call] "a" (&hypercall_page[call]) + : [thunk_target] "a" (&hypercall_page[call]) : __HYPERCALL_CLOBBER5); clac(); From 470703735b39d743afe0d9e91edaab6ac400c6c7 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 11 Jan 2018 21:46:32 +0000 Subject: [PATCH 1365/3326] x86/retpoline/checksum32: Convert assembler indirect jumps commit 5096732f6f695001fa2d6f1335a2680b37912c69 upstream. Convert all indirect jumps in 32bit checksum assembler code to use non-speculative sequences when CONFIG_RETPOLINE is enabled. Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Acked-by: Arjan van de Ven Acked-by: Ingo Molnar Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: Josh Poimboeuf Cc: thomas.lendacky@amd.com Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515707194-20531-11-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/lib/checksum_32.S | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 4d34bb548b41eb..46e71a74e6129b 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -29,7 +29,8 @@ #include #include #include - +#include + /* * computes a partial checksum, e.g. for TCP/UDP fragments */ @@ -156,7 +157,7 @@ ENTRY(csum_partial) negl %ebx lea 45f(%ebx,%ebx,2), %ebx testl %esi, %esi - jmp *%ebx + JMP_NOSPEC %ebx # Handle 2-byte-aligned regions 20: addw (%esi), %ax @@ -439,7 +440,7 @@ ENTRY(csum_partial_copy_generic) andl $-32,%edx lea 3f(%ebx,%ebx), %ebx testl %esi, %esi - jmp *%ebx + JMP_NOSPEC %ebx 1: addl $64,%esi addl $64,%edi SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) From b0edc2dfb684b43c56d81a58271430a614a4975a Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 11 Jan 2018 21:46:33 +0000 Subject: [PATCH 1366/3326] x86/retpoline/irq32: Convert assembler indirect jumps commit 7614e913db1f40fff819b36216484dc3808995d4 upstream. Convert all indirect jumps in 32bit irq inline asm code to use non speculative sequences. Signed-off-by: Andi Kleen Signed-off-by: Thomas Gleixner Acked-by: Arjan van de Ven Acked-by: Ingo Molnar Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Josh Poimboeuf Cc: thomas.lendacky@amd.com Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515707194-20531-12-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/irq_32.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index a83b3346a0e104..c1bdbd3d3232cb 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -20,6 +20,7 @@ #include #include +#include #ifdef CONFIG_DEBUG_STACKOVERFLOW @@ -55,11 +56,11 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack); static void call_on_stack(void *func, void *stack) { asm volatile("xchgl %%ebx,%%esp \n" - "call *%%edi \n" + CALL_NOSPEC "movl %%ebx,%%esp \n" : "=b" (stack) : "0" (stack), - "D"(func) + [thunk_target] "D"(func) : "memory", "cc", "edx", "ecx", "eax"); } @@ -95,11 +96,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) call_on_stack(print_stack_overflow, isp); asm volatile("xchgl %%ebx,%%esp \n" - "call *%%edi \n" + CALL_NOSPEC "movl %%ebx,%%esp \n" : "=a" (arg1), "=b" (isp) : "0" (desc), "1" (isp), - "D" (desc->handle_irq) + [thunk_target] "D" (desc->handle_irq) : "memory", "cc", "ecx"); return 1; } From b9cdaaf0a3becc52e8b4662fef28c452c4f009b4 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 12 Jan 2018 11:11:27 +0000 Subject: [PATCH 1367/3326] x86/retpoline: Fill return stack buffer on vmexit commit 117cc7a908c83697b0b737d15ae1eb5943afe35b upstream. In accordance with the Intel and AMD documentation, we need to overwrite all entries in the RSB on exiting a guest, to prevent malicious branch target predictions from affecting the host kernel. This is needed both for retpoline and for IBRS. [ak: numbers again for the RSB stuffing labels] Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Tested-by: Peter Zijlstra (Intel) Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: Josh Poimboeuf Cc: thomas.lendacky@amd.com Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515755487-8524-1-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/nospec-branch.h | 78 +++++++++++++++++++++++++++- arch/x86/kvm/svm.c | 4 ++ arch/x86/kvm/vmx.c | 4 ++ 3 files changed, 85 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index ea034fa6e261e7..402a11c803c38b 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -7,6 +7,48 @@ #include #include +/* + * Fill the CPU return stack buffer. + * + * Each entry in the RSB, if used for a speculative 'ret', contains an + * infinite 'pause; jmp' loop to capture speculative execution. + * + * This is required in various cases for retpoline and IBRS-based + * mitigations for the Spectre variant 2 vulnerability. Sometimes to + * eliminate potentially bogus entries from the RSB, and sometimes + * purely to ensure that it doesn't get empty, which on some CPUs would + * allow predictions from other (unwanted!) sources to be used. + * + * We define a CPP macro such that it can be used from both .S files and + * inline assembly. It's possible to do a .macro and then include that + * from C via asm(".include ") but let's not go there. + */ + +#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ +#define RSB_FILL_LOOPS 16 /* To avoid underflow */ + +/* + * Google experimented with loop-unrolling and this turned out to be + * the optimal version — two calls, each with their own speculation + * trap should their return address end up getting used, in a loop. + */ +#define __FILL_RETURN_BUFFER(reg, nr, sp) \ + mov $(nr/2), reg; \ +771: \ + call 772f; \ +773: /* speculation trap */ \ + pause; \ + jmp 773b; \ +772: \ + call 774f; \ +775: /* speculation trap */ \ + pause; \ + jmp 775b; \ +774: \ + dec reg; \ + jnz 771b; \ + add $(BITS_PER_LONG/8) * nr, sp; + #ifdef __ASSEMBLY__ /* @@ -74,6 +116,20 @@ #else call *\reg #endif +.endm + + /* + * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP + * monstrosity above, manually. + */ +.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req +#ifdef CONFIG_RETPOLINE + ANNOTATE_NOSPEC_ALTERNATIVE + ALTERNATIVE "jmp .Lskip_rsb_\@", \ + __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \ + \ftr +.Lskip_rsb_\@: +#endif .endm #else /* __ASSEMBLY__ */ @@ -119,7 +175,7 @@ X86_FEATURE_RETPOLINE) # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) -#else /* No retpoline */ +#else /* No retpoline for C / inline asm */ # define CALL_NOSPEC "call *%[thunk_target]\n" # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) #endif @@ -134,5 +190,25 @@ enum spectre_v2_mitigation { SPECTRE_V2_IBRS, }; +/* + * On VMEXIT we must ensure that no RSB predictions learned in the guest + * can be followed in the host, by overwriting the RSB completely. Both + * retpoline and IBRS mitigations for Spectre v2 need this; only on future + * CPUs with IBRS_ATT *might* it be avoided. + */ +static inline void vmexit_fill_RSB(void) +{ +#ifdef CONFIG_RETPOLINE + unsigned long loops = RSB_CLEAR_LOOPS / 2; + + asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE + ALTERNATIVE("jmp 910f", + __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), + X86_FEATURE_RETPOLINE) + "910:" + : "=&r" (loops), ASM_CALL_CONSTRAINT + : "r" (loops) : "memory" ); +#endif +} #endif /* __ASSEMBLY__ */ #endif /* __NOSPEC_BRANCH_H__ */ diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 714e4804cbe280..6a8284f7232830 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include "trace.h" @@ -5013,6 +5014,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* Eliminate branch target predictions from guest mode */ + vmexit_fill_RSB(); + #ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, svm->host.gs_base); #else diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e6f291d88f3e37..ef16cf0f7cfd0c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -50,6 +50,7 @@ #include #include #include +#include #include "trace.h" #include "pmu.h" @@ -9483,6 +9484,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* Eliminate branch target predictions from guest mode */ + vmexit_fill_RSB(); + /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ if (debugctlmsr) update_debugctlmsr(debugctlmsr); From 5a6e7a27d01eee812e37030aa083ac7bbba762c5 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 11 Jan 2018 17:16:51 -0800 Subject: [PATCH 1368/3326] selftests/x86: Add test_vsyscall commit 352909b49ba0d74929b96af6dfbefc854ab6ebb5 upstream. This tests that the vsyscall entries do what they're expected to do. It also confirms that attempts to read the vsyscall page behave as expected. If changes are made to the vsyscall code or its memory map handling, running this test in all three of vsyscall=none, vsyscall=emulate, and vsyscall=native are helpful. (Because it's easy, this also compares the vsyscall results to their vDSO equivalents.) Note to KAISER backporters: please test this under all three vsyscall modes. Also, in the emulate and native modes, make sure that test_vsyscall_64 agrees with the command line or config option as to which mode you're in. It's quite easy to mess up the kernel such that native mode accidentally emulates or vice versa. Greg, etc: please backport this to all your Meltdown-patched kernels. It'll help make sure the patches didn't regress vsyscalls. CSigned-off-by: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Hugh Dickins Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Kees Cook Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/2b9c5a174c1d60fd7774461d518aa75598b1d8fd.1515719552.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/x86/Makefile | 2 +- tools/testing/selftests/x86/test_vsyscall.c | 500 ++++++++++++++++++++ 2 files changed, 501 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/x86/test_vsyscall.c diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 7b1adeee4b0f19..91fbfa8fdc150b 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -7,7 +7,7 @@ include ../lib.mk TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \ check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \ - protection_keys test_vdso + protection_keys test_vdso test_vsyscall TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ test_FCMOV test_FCOMI test_FISTTP \ vdso_restorer diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c new file mode 100644 index 00000000000000..6e0bd52ad53d06 --- /dev/null +++ b/tools/testing/selftests/x86/test_vsyscall.c @@ -0,0 +1,500 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __x86_64__ +# define VSYS(x) (x) +#else +# define VSYS(x) 0 +#endif + +#ifndef SYS_getcpu +# ifdef __x86_64__ +# define SYS_getcpu 309 +# else +# define SYS_getcpu 318 +# endif +#endif + +static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), + int flags) +{ + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sa.sa_sigaction = handler; + sa.sa_flags = SA_SIGINFO | flags; + sigemptyset(&sa.sa_mask); + if (sigaction(sig, &sa, 0)) + err(1, "sigaction"); +} + +/* vsyscalls and vDSO */ +bool should_read_vsyscall = false; + +typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz); +gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000); +gtod_t vdso_gtod; + +typedef int (*vgettime_t)(clockid_t, struct timespec *); +vgettime_t vdso_gettime; + +typedef long (*time_func_t)(time_t *t); +time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400); +time_func_t vdso_time; + +typedef long (*getcpu_t)(unsigned *, unsigned *, void *); +getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800); +getcpu_t vdso_getcpu; + +static void init_vdso(void) +{ + void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); + if (!vdso) + vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); + if (!vdso) { + printf("[WARN]\tfailed to find vDSO\n"); + return; + } + + vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday"); + if (!vdso_gtod) + printf("[WARN]\tfailed to find gettimeofday in vDSO\n"); + + vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime"); + if (!vdso_gettime) + printf("[WARN]\tfailed to find clock_gettime in vDSO\n"); + + vdso_time = (time_func_t)dlsym(vdso, "__vdso_time"); + if (!vdso_time) + printf("[WARN]\tfailed to find time in vDSO\n"); + + vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu"); + if (!vdso_getcpu) { + /* getcpu() was never wired up in the 32-bit vDSO. */ + printf("[%s]\tfailed to find getcpu in vDSO\n", + sizeof(long) == 8 ? "WARN" : "NOTE"); + } +} + +static int init_vsys(void) +{ +#ifdef __x86_64__ + int nerrs = 0; + FILE *maps; + char line[128]; + bool found = false; + + maps = fopen("/proc/self/maps", "r"); + if (!maps) { + printf("[WARN]\tCould not open /proc/self/maps -- assuming vsyscall is r-x\n"); + should_read_vsyscall = true; + return 0; + } + + while (fgets(line, sizeof(line), maps)) { + char r, x; + void *start, *end; + char name[128]; + if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s", + &start, &end, &r, &x, name) != 5) + continue; + + if (strcmp(name, "[vsyscall]")) + continue; + + printf("\tvsyscall map: %s", line); + + if (start != (void *)0xffffffffff600000 || + end != (void *)0xffffffffff601000) { + printf("[FAIL]\taddress range is nonsense\n"); + nerrs++; + } + + printf("\tvsyscall permissions are %c-%c\n", r, x); + should_read_vsyscall = (r == 'r'); + if (x != 'x') { + vgtod = NULL; + vtime = NULL; + vgetcpu = NULL; + } + + found = true; + break; + } + + fclose(maps); + + if (!found) { + printf("\tno vsyscall map in /proc/self/maps\n"); + should_read_vsyscall = false; + vgtod = NULL; + vtime = NULL; + vgetcpu = NULL; + } + + return nerrs; +#else + return 0; +#endif +} + +/* syscalls */ +static inline long sys_gtod(struct timeval *tv, struct timezone *tz) +{ + return syscall(SYS_gettimeofday, tv, tz); +} + +static inline int sys_clock_gettime(clockid_t id, struct timespec *ts) +{ + return syscall(SYS_clock_gettime, id, ts); +} + +static inline long sys_time(time_t *t) +{ + return syscall(SYS_time, t); +} + +static inline long sys_getcpu(unsigned * cpu, unsigned * node, + void* cache) +{ + return syscall(SYS_getcpu, cpu, node, cache); +} + +static jmp_buf jmpbuf; + +static void sigsegv(int sig, siginfo_t *info, void *ctx_void) +{ + siglongjmp(jmpbuf, 1); +} + +static double tv_diff(const struct timeval *a, const struct timeval *b) +{ + return (double)(a->tv_sec - b->tv_sec) + + (double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6; +} + +static int check_gtod(const struct timeval *tv_sys1, + const struct timeval *tv_sys2, + const struct timezone *tz_sys, + const char *which, + const struct timeval *tv_other, + const struct timezone *tz_other) +{ + int nerrs = 0; + double d1, d2; + + if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest || tz_sys->tz_dsttime != tz_other->tz_dsttime)) { + printf("[FAIL] %s tz mismatch\n", which); + nerrs++; + } + + d1 = tv_diff(tv_other, tv_sys1); + d2 = tv_diff(tv_sys2, tv_other); + printf("\t%s time offsets: %lf %lf\n", which, d1, d2); + + if (d1 < 0 || d2 < 0) { + printf("[FAIL]\t%s time was inconsistent with the syscall\n", which); + nerrs++; + } else { + printf("[OK]\t%s gettimeofday()'s timeval was okay\n", which); + } + + return nerrs; +} + +static int test_gtod(void) +{ + struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys; + struct timezone tz_sys, tz_vdso, tz_vsys; + long ret_vdso = -1; + long ret_vsys = -1; + int nerrs = 0; + + printf("[RUN]\ttest gettimeofday()\n"); + + if (sys_gtod(&tv_sys1, &tz_sys) != 0) + err(1, "syscall gettimeofday"); + if (vdso_gtod) + ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso); + if (vgtod) + ret_vsys = vgtod(&tv_vsys, &tz_vsys); + if (sys_gtod(&tv_sys2, &tz_sys) != 0) + err(1, "syscall gettimeofday"); + + if (vdso_gtod) { + if (ret_vdso == 0) { + nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso); + } else { + printf("[FAIL]\tvDSO gettimeofday() failed: %ld\n", ret_vdso); + nerrs++; + } + } + + if (vgtod) { + if (ret_vsys == 0) { + nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys); + } else { + printf("[FAIL]\tvsys gettimeofday() failed: %ld\n", ret_vsys); + nerrs++; + } + } + + return nerrs; +} + +static int test_time(void) { + int nerrs = 0; + + printf("[RUN]\ttest time()\n"); + long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0; + long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1; + t_sys1 = sys_time(&t2_sys1); + if (vdso_time) + t_vdso = vdso_time(&t2_vdso); + if (vtime) + t_vsys = vtime(&t2_vsys); + t_sys2 = sys_time(&t2_sys2); + if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) { + printf("[FAIL]\tsyscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n", t_sys1, t2_sys1, t_sys2, t2_sys2); + nerrs++; + return nerrs; + } + + if (vdso_time) { + if (t_vdso < 0 || t_vdso != t2_vdso) { + printf("[FAIL]\tvDSO failed (ret:%ld output:%ld)\n", t_vdso, t2_vdso); + nerrs++; + } else if (t_vdso < t_sys1 || t_vdso > t_sys2) { + printf("[FAIL]\tvDSO returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vdso, t_sys2); + nerrs++; + } else { + printf("[OK]\tvDSO time() is okay\n"); + } + } + + if (vtime) { + if (t_vsys < 0 || t_vsys != t2_vsys) { + printf("[FAIL]\tvsyscall failed (ret:%ld output:%ld)\n", t_vsys, t2_vsys); + nerrs++; + } else if (t_vsys < t_sys1 || t_vsys > t_sys2) { + printf("[FAIL]\tvsyscall returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vsys, t_sys2); + nerrs++; + } else { + printf("[OK]\tvsyscall time() is okay\n"); + } + } + + return nerrs; +} + +static int test_getcpu(int cpu) +{ + int nerrs = 0; + long ret_sys, ret_vdso = -1, ret_vsys = -1; + + printf("[RUN]\tgetcpu() on CPU %d\n", cpu); + + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) { + printf("[SKIP]\tfailed to force CPU %d\n", cpu); + return nerrs; + } + + unsigned cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys; + unsigned node = 0; + bool have_node = false; + ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0); + if (vdso_getcpu) + ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0); + if (vgetcpu) + ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0); + + if (ret_sys == 0) { + if (cpu_sys != cpu) { + printf("[FAIL]\tsyscall reported CPU %hu but should be %d\n", cpu_sys, cpu); + nerrs++; + } + + have_node = true; + node = node_sys; + } + + if (vdso_getcpu) { + if (ret_vdso) { + printf("[FAIL]\tvDSO getcpu() failed\n"); + nerrs++; + } else { + if (!have_node) { + have_node = true; + node = node_vdso; + } + + if (cpu_vdso != cpu) { + printf("[FAIL]\tvDSO reported CPU %hu but should be %d\n", cpu_vdso, cpu); + nerrs++; + } else { + printf("[OK]\tvDSO reported correct CPU\n"); + } + + if (node_vdso != node) { + printf("[FAIL]\tvDSO reported node %hu but should be %hu\n", node_vdso, node); + nerrs++; + } else { + printf("[OK]\tvDSO reported correct node\n"); + } + } + } + + if (vgetcpu) { + if (ret_vsys) { + printf("[FAIL]\tvsyscall getcpu() failed\n"); + nerrs++; + } else { + if (!have_node) { + have_node = true; + node = node_vsys; + } + + if (cpu_vsys != cpu) { + printf("[FAIL]\tvsyscall reported CPU %hu but should be %d\n", cpu_vsys, cpu); + nerrs++; + } else { + printf("[OK]\tvsyscall reported correct CPU\n"); + } + + if (node_vsys != node) { + printf("[FAIL]\tvsyscall reported node %hu but should be %hu\n", node_vsys, node); + nerrs++; + } else { + printf("[OK]\tvsyscall reported correct node\n"); + } + } + } + + return nerrs; +} + +static int test_vsys_r(void) +{ +#ifdef __x86_64__ + printf("[RUN]\tChecking read access to the vsyscall page\n"); + bool can_read; + if (sigsetjmp(jmpbuf, 1) == 0) { + *(volatile int *)0xffffffffff600000; + can_read = true; + } else { + can_read = false; + } + + if (can_read && !should_read_vsyscall) { + printf("[FAIL]\tWe have read access, but we shouldn't\n"); + return 1; + } else if (!can_read && should_read_vsyscall) { + printf("[FAIL]\tWe don't have read access, but we should\n"); + return 1; + } else { + printf("[OK]\tgot expected result\n"); + } +#endif + + return 0; +} + + +#ifdef __x86_64__ +#define X86_EFLAGS_TF (1UL << 8) +static volatile sig_atomic_t num_vsyscall_traps; + +static unsigned long get_eflags(void) +{ + unsigned long eflags; + asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags)); + return eflags; +} + +static void set_eflags(unsigned long eflags) +{ + asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags"); +} + +static void sigtrap(int sig, siginfo_t *info, void *ctx_void) +{ + ucontext_t *ctx = (ucontext_t *)ctx_void; + unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP]; + + if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0) + num_vsyscall_traps++; +} + +static int test_native_vsyscall(void) +{ + time_t tmp; + bool is_native; + + if (!vtime) + return 0; + + printf("[RUN]\tchecking for native vsyscall\n"); + sethandler(SIGTRAP, sigtrap, 0); + set_eflags(get_eflags() | X86_EFLAGS_TF); + vtime(&tmp); + set_eflags(get_eflags() & ~X86_EFLAGS_TF); + + /* + * If vsyscalls are emulated, we expect a single trap in the + * vsyscall page -- the call instruction will trap with RIP + * pointing to the entry point before emulation takes over. + * In native mode, we expect two traps, since whatever code + * the vsyscall page contains will be more than just a ret + * instruction. + */ + is_native = (num_vsyscall_traps > 1); + + printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n", + (is_native ? "native" : "emulated"), + (int)num_vsyscall_traps); + + return 0; +} +#endif + +int main(int argc, char **argv) +{ + int nerrs = 0; + + init_vdso(); + nerrs += init_vsys(); + + nerrs += test_gtod(); + nerrs += test_time(); + nerrs += test_getcpu(0); + nerrs += test_getcpu(1); + + sethandler(SIGSEGV, sigsegv, 0); + nerrs += test_vsys_r(); + +#ifdef __x86_64__ + nerrs += test_native_vsyscall(); +#endif + + return nerrs ? 1 : 0; +} From 026b3f23c970a22165ffdbd47a2a0cdccfd8c009 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 14 Jan 2018 00:23:57 +0100 Subject: [PATCH 1369/3326] x86/pti: Fix !PCID and sanitize defines commit f10ee3dcc9f0aba92a5c4c064628be5200765dc2 upstream. The switch to the user space page tables in the low level ASM code sets unconditionally bit 12 and bit 11 of CR3. Bit 12 is switching the base address of the page directory to the user part, bit 11 is switching the PCID to the PCID associated with the user page tables. This fails on a machine which lacks PCID support because bit 11 is set in CR3. Bit 11 is reserved when PCID is inactive. While the Intel SDM claims that the reserved bits are ignored when PCID is disabled, the AMD APM states that they should be cleared. This went unnoticed as the AMD APM was not checked when the code was developed and reviewed and test systems with Intel CPUs never failed to boot. The report is against a Centos 6 host where the guest fails to boot, so it's not yet clear whether this is a virt issue or can happen on real hardware too, but thats irrelevant as the AMD APM clearly ask for clearing the reserved bits. Make sure that on non PCID machines bit 11 is not set by the page table switching code. Andy suggested to rename the related bits and masks so they are clearly describing what they should be used for, which is done as well for clarity. That split could have been done with alternatives but the macro hell is horrible and ugly. This can be done on top if someone cares to remove the extra orq. For now it's a straight forward fix. Fixes: 6fd166aae78c ("x86/mm: Use/Fix PCID to optimize user/kernel switches") Reported-by: Laura Abbott Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: stable Cc: Borislav Petkov Cc: Andy Lutomirski Cc: Willy Tarreau Cc: David Woodhouse Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801140009150.2371@nanos Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 36 ++++++++++++++------------ arch/x86/include/asm/processor-flags.h | 2 +- arch/x86/include/asm/tlbflush.h | 6 ++--- 3 files changed, 23 insertions(+), 21 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 45a63e00a6af9a..3f48f695d5e6ac 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -198,8 +198,11 @@ For 32-bit we have the following conventions - kernel is built with * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two * halves: */ -#define PTI_SWITCH_PGTABLES_MASK (1<= (1 << X86_CR3_PTI_SWITCH_BIT)); + BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT)); /* * The ASID being passed in here should have respected the * MAX_ASID_AVAILABLE and thus never have the switch bit set. */ - VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_SWITCH_BIT)); + VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); #endif /* * The dynamically-assigned ASIDs that get passed in are small @@ -112,7 +112,7 @@ static inline u16 user_pcid(u16 asid) { u16 ret = kern_pcid(asid); #ifdef CONFIG_PAGE_TABLE_ISOLATION - ret |= 1 << X86_CR3_PTI_SWITCH_BIT; + ret |= 1 << X86_CR3_PTI_PCID_USER_BIT; #endif return ret; } From c3e7fc96545d83abbc1c2c1a7ac6809cf5fb6202 Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Fri, 12 Jan 2018 15:24:59 -0800 Subject: [PATCH 1370/3326] security/Kconfig: Correct the Documentation reference for PTI commit a237f762681e2a394ca67f21df2feb2b76a3609b upstream. When the config option for PTI was added a reference to documentation was added as well. But the documentation did not exist at that point. The final documentation has a different file name. Fix it up to point to the proper file. Fixes: 385ce0ea ("x86/mm/pti: Add Kconfig") Signed-off-by: W. Trevor King Signed-off-by: Thomas Gleixner Cc: Dave Hansen Cc: linux-mm@kvack.org Cc: linux-security-module@vger.kernel.org Cc: James Morris Cc: "Serge E. Hallyn" Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/3009cc8ccbddcd897ec1e0cb6dda524929de0d14.1515799398.git.wking@tremily.us Signed-off-by: Greg Kroah-Hartman --- security/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/Kconfig b/security/Kconfig index 6614b9312b45ef..b5c2b5d0c6c0eb 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -63,7 +63,7 @@ config PAGE_TABLE_ISOLATION ensuring that the majority of kernel addresses are not mapped into userspace. - See Documentation/x86/pagetable-isolation.txt for more details. + See Documentation/x86/pti.txt for more details. config SECURITY_INFINIBAND bool "Infiniband Security Hooks" From 6d8b7d3934b2c4f718441784823fbaeaa4c5327d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 14 Jan 2018 11:27:13 +0100 Subject: [PATCH 1371/3326] x86,perf: Disable intel_bts when PTI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 99a9dc98ba52267ce5e062b52de88ea1f1b2a7d8 upstream. The intel_bts driver does not use the 'normal' BTS buffer which is exposed through the cpu_entry_area but instead uses the memory allocated for the perf AUX buffer. This obviously comes apart when using PTI because then the kernel mapping; which includes that AUX buffer memory; disappears. Fixing this requires to expose a mapping which is visible in all context and that's not trivial. As a quick fix disable this driver when PTI is enabled to prevent malfunction. Fixes: 385ce0ea4c07 ("x86/mm/pti: Add Kconfig") Reported-by: Vince Weaver Reported-by: Robert Święcki Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: Alexander Shishkin Cc: greg@kroah.com Cc: hughd@google.com Cc: luto@amacapital.net Cc: Vince Weaver Cc: torvalds@linux-foundation.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180114102713.GB6166@worktop.programming.kicks-ass.net Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/intel/bts.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 141e07b0621689..24ffa1e88cf948 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -582,6 +582,24 @@ static __init int bts_init(void) if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) return -ENODEV; + if (boot_cpu_has(X86_FEATURE_PTI)) { + /* + * BTS hardware writes through a virtual memory map we must + * either use the kernel physical map, or the user mapping of + * the AUX buffer. + * + * However, since this driver supports per-CPU and per-task inherit + * we cannot use the user mapping since it will not be availble + * if we're not running the owning process. + * + * With PTI we can't use the kernal map either, because its not + * there when we run userspace. + * + * For now, disable this driver when using PTI. + */ + return -ENODEV; + } + bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE | PERF_PMU_CAP_EXCLUSIVE; bts_pmu.task_ctx_nr = perf_sw_context; From 198660b7a5dd33b114001023d540c9072603e2a8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 14 Jan 2018 22:13:29 +0100 Subject: [PATCH 1372/3326] x86/retpoline: Remove compile time warning commit b8b9ce4b5aec8de9e23cabb0a26b78641f9ab1d6 upstream. Remove the compile time warning when CONFIG_RETPOLINE=y and the compiler does not have retpoline support. Linus rationale for this is: It's wrong because it will just make people turn off RETPOLINE, and the asm updates - and return stack clearing - that are independent of the compiler are likely the most important parts because they are likely the ones easiest to target. And it's annoying because most people won't be able to do anything about it. The number of people building their own compiler? Very small. So if their distro hasn't got a compiler yet (and pretty much nobody does), the warning is just annoying crap. It is already properly reported as part of the sysfs interface. The compile-time warning only encourages bad things. Fixes: 76b043848fd2 ("x86/retpoline: Add initial retpoline support") Requested-by: Linus Torvalds Signed-off-by: Thomas Gleixner Cc: David Woodhouse Cc: Peter Zijlstra (Intel) Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: Josh Poimboeuf Cc: thomas.lendacky@amd.com Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Link: https://lkml.kernel.org/r/CA+55aFzWgquv4i6Mab6bASqYXg3ErV3XDFEYf=GEcCDQg5uAtw@mail.gmail.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/Makefile | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 974c61864978c4..504b1a4535acaa 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -240,8 +240,6 @@ ifdef CONFIG_RETPOLINE RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) ifneq ($(RETPOLINE_CFLAGS),) KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE - else - $(warning CONFIG_RETPOLINE=y, but not supported by the compiler. Toolchain update recommended.) endif endif From 9c0bf9847171edd597a13adf3ddc879e96d947cd Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 17 Jan 2018 09:45:30 +0100 Subject: [PATCH 1373/3326] Linux 4.14.14 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a67c5179052a68..4951305eb867d7 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 13 +SUBLEVEL = 14 EXTRAVERSION = NAME = Petit Gorille From 50c1c6cc09dcea608ace4bd258714d01909fed45 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 12 Jan 2018 16:53:17 -0800 Subject: [PATCH 1374/3326] tools/objtool/Makefile: don't assume sync-check.sh is executable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 0f908ccbeca99ddf0ad60afa710e72aded4a5ea7 upstream. patch(1) loses the x bit. So if a user follows our patching instructions in Documentation/admin-guide/README.rst, their kernel will not compile. Fixes: 3bd51c5a371de ("objtool: Move kernel headers/code sync check to a script") Reported-by: Nicolas Bock Reported-by Joakim Tjernlund Cc: Ingo Molnar Cc: Josh Poimboeuf Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Cc: Holger Hoffstätte Signed-off-by: Greg Kroah-Hartman --- tools/objtool/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index ae0272f9a09184..e6acc281dd3757 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -46,7 +46,7 @@ $(OBJTOOL_IN): fixdep FORCE @$(MAKE) $(build)=objtool $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN) - @./sync-check.sh + @$(CONFIG_SHELL) ./sync-check.sh $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@ From aa9b50158262bb9cbba82ac9ad5ea3bac0c81e25 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sat, 6 Jan 2018 10:59:41 -0500 Subject: [PATCH 1375/3326] drm/nouveau/disp/gf119: add missing drive vfunc ptr commit 1b5c7ef3d0d0610bda9b63263f7c5b7178d11015 upstream. Fixes broken dp on GF119: Call Trace: ? nvkm_dp_train_drive+0x183/0x2c0 [nouveau] nvkm_dp_acquire+0x4f3/0xcd0 [nouveau] nv50_disp_super_2_2+0x5d/0x470 [nouveau] ? nvkm_devinit_pll_set+0xf/0x20 [nouveau] gf119_disp_super+0x19c/0x2f0 [nouveau] process_one_work+0x193/0x3c0 worker_thread+0x35/0x3b0 kthread+0x125/0x140 ? process_one_work+0x3c0/0x3c0 ? kthread_park+0x60/0x60 ret_from_fork+0x25/0x30 Code: Bad RIP value. RIP: (null) RSP: ffffb1e243e4bc38 CR2: 0000000000000000 Fixes: af85389c614a drm/nouveau/disp: shuffle functions around Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103421 Signed-off-by: Rob Clark Signed-off-by: Ben Skeggs Cc: Sven Joachim Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c index a2978a37b4f3c7..700fc754f28a4c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c @@ -174,6 +174,7 @@ gf119_sor = { .links = gf119_sor_dp_links, .power = g94_sor_dp_power, .pattern = gf119_sor_dp_pattern, + .drive = gf119_sor_dp_drive, .vcpi = gf119_sor_dp_vcpi, .audio = gf119_sor_dp_audio, .audio_sym = gf119_sor_dp_audio_sym, From e093c0810354e3be3a79c063f0d39152aa9e5e09 Mon Sep 17 00:00:00 2001 From: Simon Ser Date: Sat, 30 Dec 2017 14:43:32 -0600 Subject: [PATCH 1376/3326] objtool: Fix seg fault with clang-compiled objects commit ce90aaf5cde4ce057b297bb6c955caf16ef00ee6 upstream. Fix a seg fault which happens when an input file provided to 'objtool orc generate' doesn't have a '.shstrtab' section (for instance, object files produced by clang don't have this section). Signed-off-by: Simon Ser Signed-off-by: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/c0f2231683e9bed40fac1f13ce2c33b8389854bc.1514666459.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Cc: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- tools/objtool/orc_gen.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index e5ca31429c9bac..e61fe703197baa 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -165,6 +165,8 @@ int create_orc_sections(struct objtool_file *file) /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */ sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx); + if (!sec) + return -1; ip_relasec = elf_create_rela_section(file->elf, sec); if (!ip_relasec) From 78172c7d5c2c956667c360c9a3f30f69f6e15478 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Tue, 26 Dec 2017 15:27:20 -0600 Subject: [PATCH 1377/3326] objtool: Fix Clang enum conversion warning commit e7e83dd3ff1dd2f9e60213f6eedc7e5b08192062 upstream. Fix the following Clang enum conversion warning: arch/x86/decode.c:141:20: error: implicit conversion from enumeration type 'enum op_src_type' to different enumeration type 'enum op_dest_type' [-Werror,-Wenum-conversion] op->dest.type = OP_SRC_REG; ~ ^~~~~~~~~~ It just happened to work before because OP_SRC_REG and OP_DEST_REG have the same value. Signed-off-by: Lukas Bulwahn Signed-off-by: Josh Poimboeuf Reviewed-by: Nicholas Mc Guire Reviewed-by: Nick Desaulniers Cc: Jiri Slaby Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: baa41469a7b9 ("objtool: Implement stack validation 2.0") Link: http://lkml.kernel.org/r/b4156c5738bae781c392e7a3691aed4514ebbdf2.1514323568.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Cc: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- tools/objtool/arch/x86/decode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 8acfc47af70efd..540a209b78ab3c 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -138,7 +138,7 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, *type = INSN_STACK; op->src.type = OP_SRC_ADD; op->src.reg = op_to_cfi_reg[modrm_reg][rex_r]; - op->dest.type = OP_SRC_REG; + op->dest.type = OP_DEST_REG; op->dest.reg = CFI_SP; } break; From 5174ec1d498bf2ddff440fbdd8dbf860a8594015 Mon Sep 17 00:00:00 2001 From: Simon Ser Date: Sat, 30 Dec 2017 14:43:31 -0600 Subject: [PATCH 1378/3326] objtool: Fix seg fault caused by missing parameter commit d89e426499cf36b96161bd32970d6783f1fbcb0e upstream. Fix a seg fault when no parameter is provided to 'objtool orc'. Signed-off-by: Simon Ser Signed-off-by: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/9172803ec7ebb72535bcd0b7f966ae96d515968e.1514666459.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Cc: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- tools/objtool/builtin-orc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c index 4c6b5c9ef073b3..91e8e19ff5e061 100644 --- a/tools/objtool/builtin-orc.c +++ b/tools/objtool/builtin-orc.c @@ -44,6 +44,9 @@ int cmd_orc(int argc, const char **argv) const char *objname; argc--; argv++; + if (argc <= 0) + usage_with_options(orc_usage, check_options); + if (!strncmp(argv[0], "gen", 3)) { argc = parse_options(argc, argv, check_options, orc_usage, 0); if (argc != 1) @@ -52,7 +55,6 @@ int cmd_orc(int argc, const char **argv) objname = argv[0]; return check(objname, no_fp, no_unreachable, true); - } if (!strcmp(argv[0], "dump")) { From 4167dcbc91b1d12873f86d508563967e6ae8cb80 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Tue, 9 Jan 2018 03:52:05 +1100 Subject: [PATCH 1379/3326] powerpc/pseries: Add H_GET_CPU_CHARACTERISTICS flags & wrapper commit 191eccb1580939fb0d47deb405b82a85b0379070 upstream. A new hypervisor call has been defined to communicate various characteristics of the CPU to guests. Add definitions for the hcall number, flags and a wrapper function. Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/include/asm/hvcall.h | 17 +++++++++++++++++ arch/powerpc/include/asm/plpar_wrappers.h | 14 ++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index a409177be8bdfd..f0461618bf7bee 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -241,6 +241,7 @@ #define H_GET_HCA_INFO 0x1B8 #define H_GET_PERF_COUNT 0x1BC #define H_MANAGE_TRACE 0x1C0 +#define H_GET_CPU_CHARACTERISTICS 0x1C8 #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 #define H_QUERY_INT_STATE 0x1E4 #define H_POLL_PENDING 0x1D8 @@ -330,6 +331,17 @@ #define H_SIGNAL_SYS_RESET_ALL_OTHERS -2 /* >= 0 values are CPU number */ +/* H_GET_CPU_CHARACTERISTICS return values */ +#define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0 +#define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1 +#define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2 +#define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3 +#define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4 + +#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 +#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 +#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 + /* Flag values used in H_REGISTER_PROC_TBL hcall */ #define PROC_TABLE_OP_MASK 0x18 #define PROC_TABLE_DEREG 0x10 @@ -436,6 +448,11 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc) } } +struct h_cpu_char_result { + u64 character; + u64 behaviour; +}; + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_HVCALL_H */ diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 7f01b22fa6cb0d..55eddf50d14980 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -326,4 +326,18 @@ static inline long plapr_signal_sys_reset(long cpu) return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu); } +static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf); + if (rc == H_SUCCESS) { + p->character = retbuf[0]; + p->behaviour = retbuf[1]; + } + + return rc; +} + #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ From bcba6b9024461e9b23e434eb9c0461afc4f36c1d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 10 Jan 2018 03:07:15 +1100 Subject: [PATCH 1380/3326] powerpc/64: Add macros for annotating the destination of rfid/hrfid commit 50e51c13b3822d14ff6df4279423e4b7b2269bc3 upstream. The rfid/hrfid ((Hypervisor) Return From Interrupt) instruction is used for switching from the kernel to userspace, and from the hypervisor to the guest kernel. However it can and is also used for other transitions, eg. from real mode kernel code to virtual mode kernel code, and it's not always clear from the code what the destination context is. To make it clearer when reading the code, add macros which encode the expected destination context. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/include/asm/exception-64e.h | 6 +++++ arch/powerpc/include/asm/exception-64s.h | 29 ++++++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h index a703452d67b62f..555e22d5e07f9e 100644 --- a/arch/powerpc/include/asm/exception-64e.h +++ b/arch/powerpc/include/asm/exception-64e.h @@ -209,5 +209,11 @@ exc_##label##_book3e: ori r3,r3,vector_offset@l; \ mtspr SPRN_IVOR##vector_number,r3; +#define RFI_TO_KERNEL \ + rfi + +#define RFI_TO_USER \ + rfi + #endif /* _ASM_POWERPC_EXCEPTION_64E_H */ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 9a318973af0547..6d65a561b0eeed 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -69,6 +69,35 @@ */ #define EX_R3 EX_DAR +/* Macros for annotating the expected destination of (h)rfid */ + +#define RFI_TO_KERNEL \ + rfid + +#define RFI_TO_USER \ + rfid + +#define RFI_TO_USER_OR_KERNEL \ + rfid + +#define RFI_TO_GUEST \ + rfid + +#define HRFI_TO_KERNEL \ + hrfid + +#define HRFI_TO_USER \ + hrfid + +#define HRFI_TO_USER_OR_KERNEL \ + hrfid + +#define HRFI_TO_GUEST \ + hrfid + +#define HRFI_TO_UNKNOWN \ + hrfid + #ifdef CONFIG_RELOCATABLE #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ From 11caf810bd078e7d09be9f58250818f6595e78de Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 10 Jan 2018 03:07:15 +1100 Subject: [PATCH 1381/3326] powerpc/64s: Simple RFI macro conversions commit 222f20f140623ef6033491d0103ee0875fe87d35 upstream. This commit does simple conversions of rfi/rfid to the new macros that include the expected destination context. By simple we mean cases where there is a single well known destination context, and it's simply a matter of substituting the instruction for the appropriate macro. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/include/asm/exception-64s.h | 4 ++-- arch/powerpc/kernel/entry_64.S | 14 +++++++++----- arch/powerpc/kernel/exceptions-64s.S | 22 +++++++++++----------- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 7 +++---- arch/powerpc/kvm/book3s_rmhandlers.S | 7 +++++-- arch/powerpc/kvm/book3s_segment.S | 4 ++-- 6 files changed, 32 insertions(+), 26 deletions(-) diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 6d65a561b0eeed..0c8863ff65f76f 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -242,7 +242,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) mtspr SPRN_##h##SRR0,r12; \ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ mtspr SPRN_##h##SRR1,r10; \ - h##rfid; \ + h##RFI_TO_KERNEL; \ b . /* prevent speculative execution */ #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ __EXCEPTION_PROLOG_PSERIES_1(label, h) @@ -256,7 +256,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) mtspr SPRN_##h##SRR0,r12; \ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ mtspr SPRN_##h##SRR1,r10; \ - h##rfid; \ + h##RFI_TO_KERNEL; \ b . /* prevent speculative execution */ #define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 4a0fd4f4024533..5ae0a435417d1e 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -37,6 +37,11 @@ #include #include #include +#ifdef CONFIG_PPC_BOOK3S +#include +#else +#include +#endif /* * System calls. @@ -397,8 +402,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) mtmsrd r10, 1 mtspr SPRN_SRR0, r11 mtspr SPRN_SRR1, r12 - - rfid + RFI_TO_USER b . /* prevent speculative execution */ #endif _ASM_NOKPROBE_SYMBOL(system_call_common); @@ -1073,7 +1077,7 @@ __enter_rtas: mtspr SPRN_SRR0,r5 mtspr SPRN_SRR1,r6 - rfid + RFI_TO_KERNEL b . /* prevent speculative execution */ rtas_return_loc: @@ -1098,7 +1102,7 @@ rtas_return_loc: mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 - rfid + RFI_TO_KERNEL b . /* prevent speculative execution */ _ASM_NOKPROBE_SYMBOL(__enter_rtas) _ASM_NOKPROBE_SYMBOL(rtas_return_loc) @@ -1171,7 +1175,7 @@ _GLOBAL(enter_prom) LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) andc r11,r11,r12 mtsrr1 r11 - rfid + RFI_TO_KERNEL #endif /* CONFIG_PPC_BOOK3E */ 1: /* Return from OF */ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 06598142d755c7..85795d41f6ca11 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -254,7 +254,7 @@ BEGIN_FTR_SECTION LOAD_HANDLER(r12, machine_check_handle_early) 1: mtspr SPRN_SRR0,r12 mtspr SPRN_SRR1,r11 - rfid + RFI_TO_KERNEL b . /* prevent speculative execution */ 2: /* Stack overflow. Stay on emergency stack and panic. @@ -443,7 +443,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) li r3,MSR_ME andc r10,r10,r3 /* Turn off MSR_ME */ mtspr SPRN_SRR1,r10 - rfid + RFI_TO_KERNEL b . 2: /* @@ -461,7 +461,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) */ bl machine_check_queue_event MACHINE_CHECK_HANDLER_WINDUP - rfid + RFI_TO_USER_OR_KERNEL 9: /* Deliver the machine check to host kernel in V mode. */ MACHINE_CHECK_HANDLER_WINDUP @@ -649,7 +649,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) mtspr SPRN_SRR0,r10 ld r10,PACAKMSR(r13) mtspr SPRN_SRR1,r10 - rfid + RFI_TO_KERNEL b . 8: std r3,PACA_EXSLB+EX_DAR(r13) @@ -660,7 +660,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) mtspr SPRN_SRR0,r10 ld r10,PACAKMSR(r13) mtspr SPRN_SRR1,r10 - rfid + RFI_TO_KERNEL b . EXC_COMMON_BEGIN(unrecov_slb) @@ -905,7 +905,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ mtspr SPRN_SRR0,r10 ; \ ld r10,PACAKMSR(r13) ; \ mtspr SPRN_SRR1,r10 ; \ - rfid ; \ + RFI_TO_KERNEL ; \ b . ; /* prevent speculative execution */ #define SYSCALL_FASTENDIAN \ @@ -914,7 +914,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ xori r12,r12,MSR_LE ; \ mtspr SPRN_SRR1,r12 ; \ mr r13,r9 ; \ - rfid ; /* return to userspace */ \ + RFI_TO_USER ; /* return to userspace */ \ b . ; /* prevent speculative execution */ #if defined(CONFIG_RELOCATABLE) @@ -1299,7 +1299,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) ld r11,PACA_EXGEN+EX_R11(r13) ld r12,PACA_EXGEN+EX_R12(r13) ld r13,PACA_EXGEN+EX_R13(r13) - HRFID + HRFI_TO_UNKNOWN b . #endif @@ -1403,7 +1403,7 @@ masked_##_H##interrupt: \ ld r10,PACA_EXGEN+EX_R10(r13); \ ld r11,PACA_EXGEN+EX_R11(r13); \ /* returns to kernel where r13 must be set up, so don't restore it */ \ - ##_H##rfid; \ + ##_H##RFI_TO_KERNEL; \ b .; \ MASKED_DEC_HANDLER(_H) @@ -1426,7 +1426,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt) addi r13, r13, 4 mtspr SPRN_SRR0, r13 GET_SCRATCH0(r13) - rfid + RFI_TO_KERNEL b . TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) @@ -1438,7 +1438,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) addi r13, r13, 4 mtspr SPRN_HSRR0, r13 GET_SCRATCH0(r13) - hrfid + HRFI_TO_KERNEL b . #endif diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 42639fba89e881..c85ac5c83bd4fd 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -78,7 +78,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline) mtmsrd r0,1 /* clear RI in MSR */ mtsrr0 r5 mtsrr1 r6 - RFI + RFI_TO_KERNEL kvmppc_call_hv_entry: ld r4, HSTATE_KVM_VCPU(r13) @@ -187,7 +187,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) mtmsrd r6, 1 /* Clear RI in MSR */ mtsrr0 r8 mtsrr1 r7 - RFI + RFI_TO_KERNEL /* Virtual-mode return */ .Lvirt_return: @@ -1131,8 +1131,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ld r0, VCPU_GPR(R0)(r4) ld r4, VCPU_GPR(R4)(r4) - - hrfid + HRFI_TO_GUEST b . secondary_too_late: diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 42a4b237df5f57..34a5adeff08406 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -46,6 +46,9 @@ #define FUNC(name) name +#define RFI_TO_KERNEL RFI +#define RFI_TO_GUEST RFI + .macro INTERRUPT_TRAMPOLINE intno .global kvmppc_trampoline_\intno @@ -141,7 +144,7 @@ kvmppc_handler_skip_ins: GET_SCRATCH0(r13) /* And get back into the code */ - RFI + RFI_TO_KERNEL #endif /* @@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline) ori r5, r5, MSR_EE mtsrr0 r7 mtsrr1 r6 - RFI + RFI_TO_KERNEL #include "book3s_segment.S" diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 2a2b96d5399917..93a180ceefad03 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -156,7 +156,7 @@ no_dcbz32_on: PPC_LL r9, SVCPU_R9(r3) PPC_LL r3, (SVCPU_R3)(r3) - RFI + RFI_TO_GUEST kvmppc_handler_trampoline_enter_end: @@ -407,5 +407,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) cmpwi r12, BOOK3S_INTERRUPT_DOORBELL beqa BOOK3S_INTERRUPT_DOORBELL - RFI + RFI_TO_KERNEL kvmppc_handler_trampoline_exit_end: From 627700e4558db4abe37261d80fbf8fbef1ef16cb Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 10 Jan 2018 03:07:15 +1100 Subject: [PATCH 1382/3326] powerpc/64: Convert the syscall exit path to use RFI_TO_USER/KERNEL commit b8e90cb7bc04a509e821e82ab6ed7a8ef11ba333 upstream. In the syscall exit path we may be returning to user or kernel context. We already have a test for that, because we conditionally restore r13. So use that existing test and branch, and bifurcate the return based on that. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/entry_64.S | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 5ae0a435417d1e..150c402728d128 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -267,13 +267,23 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ + ld r2,GPR2(r1) + ld r1,GPR1(r1) + mtlr r4 + mtcr r5 + mtspr SPRN_SRR0,r7 + mtspr SPRN_SRR1,r8 + RFI_TO_USER + b . /* prevent speculative execution */ + + /* exit to kernel */ 1: ld r2,GPR2(r1) ld r1,GPR1(r1) mtlr r4 mtcr r5 mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 - RFI + RFI_TO_KERNEL b . /* prevent speculative execution */ .Lsyscall_error: From bcac5d36538ff417429ed942aa0392a3c50a4783 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 10 Jan 2018 03:07:15 +1100 Subject: [PATCH 1383/3326] powerpc/64: Convert fast_exception_return to use RFI_TO_USER/KERNEL commit a08f828cf47e6c605af21d2cdec68f84e799c318 upstream. Similar to the syscall return path, in fast_exception_return we may be returning to user or kernel context. We already have a test for that, because we conditionally restore r13. So use that existing test and branch, and bifurcate the return based on that. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/entry_64.S | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 150c402728d128..8a8a6d7ddcc6e6 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -892,7 +892,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ACCOUNT_CPU_USER_EXIT(r13, r2, r4) REST_GPR(13, r1) -1: + mtspr SPRN_SRR1,r3 ld r2,_CCR(r1) @@ -905,8 +905,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r3,GPR3(r1) ld r4,GPR4(r1) ld r1,GPR1(r1) + RFI_TO_USER + b . /* prevent speculative execution */ + +1: mtspr SPRN_SRR1,r3 + + ld r2,_CCR(r1) + mtcrf 0xFF,r2 + ld r2,_NIP(r1) + mtspr SPRN_SRR0,r2 - rfid + ld r0,GPR0(r1) + ld r2,GPR2(r1) + ld r3,GPR3(r1) + ld r4,GPR4(r1) + ld r1,GPR1(r1) + RFI_TO_KERNEL b . /* prevent speculative execution */ #endif /* CONFIG_PPC_BOOK3E */ From 9488c6b916535c7df2e99b8e3928faa5ff59b150 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 10 Jan 2018 03:07:15 +1100 Subject: [PATCH 1384/3326] powerpc/64s: Convert slb_miss_common to use RFI_TO_USER/KERNEL commit c7305645eb0c1621351cfc104038831ae87c0053 upstream. In the SLB miss handler we may be returning to user or kernel. We need to add a check early on and save the result in the cr4 register, and then we bifurcate the return path based on that. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/exceptions-64s.S | 29 +++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 85795d41f6ca11..845291357f0b9d 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -596,6 +596,9 @@ EXC_COMMON_BEGIN(slb_miss_common) stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ + andi. r9,r11,MSR_PR // Check for exception from userspace + cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later + /* * Test MSR_RI before calling slb_allocate_realmode, because the * MSR in r11 gets clobbered. However we still want to allocate @@ -622,9 +625,32 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) /* All done -- return from exception. */ + bne cr4,1f /* returning to kernel */ + +.machine push +.machine "power4" + mtcrf 0x80,r9 + mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ + mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ + mtcrf 0x02,r9 /* I/D indication is in cr6 */ + mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ +.machine pop + + RESTORE_CTR(r9, PACA_EXSLB) + RESTORE_PPR_PACA(PACA_EXSLB, r9) + mr r3,r12 + ld r9,PACA_EXSLB+EX_R9(r13) + ld r10,PACA_EXSLB+EX_R10(r13) + ld r11,PACA_EXSLB+EX_R11(r13) + ld r12,PACA_EXSLB+EX_R12(r13) + ld r13,PACA_EXSLB+EX_R13(r13) + RFI_TO_USER + b . /* prevent speculative execution */ +1: .machine push .machine "power4" mtcrf 0x80,r9 + mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ mtcrf 0x02,r9 /* I/D indication is in cr6 */ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ @@ -638,9 +664,10 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) ld r11,PACA_EXSLB+EX_R11(r13) ld r12,PACA_EXSLB+EX_R12(r13) ld r13,PACA_EXSLB+EX_R13(r13) - rfid + RFI_TO_KERNEL b . /* prevent speculative execution */ + 2: std r3,PACA_EXSLB+EX_DAR(r13) mr r3,r12 mfspr r11,SPRN_SRR0 From b434c155ab4411d732bcafae699bfc35e9847151 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 10 Jan 2018 03:07:15 +1100 Subject: [PATCH 1385/3326] powerpc/64s: Add support for RFI flush of L1-D cache commit aa8a5e0062ac940f7659394f4817c948dc8c0667 upstream. On some CPUs we can prevent the Meltdown vulnerability by flushing the L1-D cache on exit from kernel to user mode, and from hypervisor to guest. This is known to be the case on at least Power7, Power8 and Power9. At this time we do not know the status of the vulnerability on other CPUs such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale CPUs. As more information comes to light we can enable this, or other mechanisms on those CPUs. The vulnerability occurs when the load of an architecturally inaccessible memory region (eg. userspace load of kernel memory) is speculatively executed to the point where its result can influence the address of a subsequent speculatively executed load. In order for that to happen, the first load must hit in the L1, because before the load is sent to the L2 the permission check is performed. Therefore if no kernel addresses hit in the L1 the vulnerability can not occur. We can ensure that is the case by flushing the L1 whenever we return to userspace. Similarly for hypervisor vs guest. In order to flush the L1-D cache on exit, we add a section of nops at each (h)rfi location that returns to a lower privileged context, and patch that with some sequence. Newer firmwares are able to advertise to us that there is a special nop instruction that flushes the L1-D. If we do not see that advertised, we fall back to doing a displacement flush in software. For guest kernels we support migration between some CPU versions, and different CPUs may use different flush instructions. So that we are prepared to migrate to a machine with a different flush instruction activated, we may have to patch more than one flush instruction at boot if the hypervisor tells us to. In the end this patch is mostly the work of Nicholas Piggin and Michael Ellerman. However a cast of thousands contributed to analysis of the issue, earlier versions of the patch, back ports testing etc. Many thanks to all of them. Tested-by: Jon Masters Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/include/asm/exception-64s.h | 40 ++++++++--- arch/powerpc/include/asm/feature-fixups.h | 13 ++++ arch/powerpc/include/asm/paca.h | 10 +++ arch/powerpc/include/asm/setup.h | 13 ++++ arch/powerpc/kernel/asm-offsets.c | 5 ++ arch/powerpc/kernel/exceptions-64s.S | 84 +++++++++++++++++++++++ arch/powerpc/kernel/setup_64.c | 79 +++++++++++++++++++++ arch/powerpc/kernel/vmlinux.lds.S | 9 +++ arch/powerpc/lib/feature-fixups.c | 41 +++++++++++ 9 files changed, 286 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 0c8863ff65f76f..ccf10c2f8899f2 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -69,34 +69,58 @@ */ #define EX_R3 EX_DAR -/* Macros for annotating the expected destination of (h)rfid */ +/* + * Macros for annotating the expected destination of (h)rfid + * + * The nop instructions allow us to insert one or more instructions to flush the + * L1-D cache when returning to userspace or a guest. + */ +#define RFI_FLUSH_SLOT \ + RFI_FLUSH_FIXUP_SECTION; \ + nop; \ + nop; \ + nop #define RFI_TO_KERNEL \ rfid #define RFI_TO_USER \ - rfid + RFI_FLUSH_SLOT; \ + rfid; \ + b rfi_flush_fallback #define RFI_TO_USER_OR_KERNEL \ - rfid + RFI_FLUSH_SLOT; \ + rfid; \ + b rfi_flush_fallback #define RFI_TO_GUEST \ - rfid + RFI_FLUSH_SLOT; \ + rfid; \ + b rfi_flush_fallback #define HRFI_TO_KERNEL \ hrfid #define HRFI_TO_USER \ - hrfid + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback #define HRFI_TO_USER_OR_KERNEL \ - hrfid + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback #define HRFI_TO_GUEST \ - hrfid + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback #define HRFI_TO_UNKNOWN \ - hrfid + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback #ifdef CONFIG_RELOCATABLE #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 8f88f771cc55ce..1e82eb3caabd19 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -187,7 +187,20 @@ label##3: \ FTR_ENTRY_OFFSET label##1b-label##3b; \ .popsection; +#define RFI_FLUSH_FIXUP_SECTION \ +951: \ + .pushsection __rfi_flush_fixup,"a"; \ + .align 2; \ +952: \ + FTR_ENTRY_OFFSET 951b-952b; \ + .popsection; + + #ifndef __ASSEMBLY__ +#include + +extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; + void apply_feature_fixups(void); void setup_feature_keys(void); #endif diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 04b60af027ae3b..b8366df50d1956 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -231,6 +231,16 @@ struct paca_struct { struct sibling_subcore_state *sibling_subcore_state; #endif #endif +#ifdef CONFIG_PPC_BOOK3S_64 + /* + * rfi fallback flush must be in its own cacheline to prevent + * other paca data leaking into the L1d + */ + u64 exrfi[EX_SIZE] __aligned(0x80); + void *rfi_flush_fallback_area; + u64 l1d_flush_congruence; + u64 l1d_flush_sets; +#endif }; extern void copy_mm_to_paca(struct mm_struct *mm); diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index cf00ec26303aef..469b7fdc9be41c 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -39,6 +39,19 @@ static inline void pseries_big_endian_exceptions(void) {} static inline void pseries_little_endian_exceptions(void) {} #endif /* CONFIG_PPC_PSERIES */ +void rfi_flush_enable(bool enable); + +/* These are bit flags */ +enum l1d_flush_type { + L1D_FLUSH_NONE = 0x1, + L1D_FLUSH_FALLBACK = 0x2, + L1D_FLUSH_ORI = 0x4, + L1D_FLUSH_MTTRIG = 0x8, +}; + +void __init setup_rfi_flush(enum l1d_flush_type, bool enable); +void do_rfi_flush_fixups(enum l1d_flush_type types); + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_SETUP_H */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 8cfb20e38cfe97..748cdc4bb89ab4 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -237,6 +237,11 @@ int main(void) OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp); OFFSET(PACA_IN_MCE, paca_struct, in_mce); OFFSET(PACA_IN_NMI, paca_struct, in_nmi); + OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area); + OFFSET(PACA_EXRFI, paca_struct, exrfi); + OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence); + OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets); + #endif OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); OFFSET(PACAKEXECSTATE, paca_struct, kexec_state); diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 845291357f0b9d..e9f72abc52b72b 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1434,6 +1434,90 @@ masked_##_H##interrupt: \ b .; \ MASKED_DEC_HANDLER(_H) +TRAMP_REAL_BEGIN(rfi_flush_fallback) + SET_SCRATCH0(r13); + GET_PACA(r13); + std r9,PACA_EXRFI+EX_R9(r13) + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) + std r12,PACA_EXRFI+EX_R12(r13) + std r8,PACA_EXRFI+EX_R13(r13) + mfctr r9 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) + ld r11,PACA_L1D_FLUSH_SETS(r13) + ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) + /* + * The load adresses are at staggered offsets within cachelines, + * which suits some pipelines better (on others it should not + * hurt). + */ + addi r12,r12,8 + mtctr r11 + DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ + + /* order ld/st prior to dcbt stop all streams with flushing */ + sync +1: li r8,0 + .rept 8 /* 8-way set associative */ + ldx r11,r10,r8 + add r8,r8,r12 + xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not + add r8,r8,r11 // Add 0, this creates a dependency on the ldx + .endr + addi r10,r10,128 /* 128 byte cache line */ + bdnz 1b + + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) + ld r11,PACA_EXRFI+EX_R11(r13) + ld r12,PACA_EXRFI+EX_R12(r13) + ld r8,PACA_EXRFI+EX_R13(r13) + GET_SCRATCH0(r13); + rfid + +TRAMP_REAL_BEGIN(hrfi_flush_fallback) + SET_SCRATCH0(r13); + GET_PACA(r13); + std r9,PACA_EXRFI+EX_R9(r13) + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) + std r12,PACA_EXRFI+EX_R12(r13) + std r8,PACA_EXRFI+EX_R13(r13) + mfctr r9 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) + ld r11,PACA_L1D_FLUSH_SETS(r13) + ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) + /* + * The load adresses are at staggered offsets within cachelines, + * which suits some pipelines better (on others it should not + * hurt). + */ + addi r12,r12,8 + mtctr r11 + DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ + + /* order ld/st prior to dcbt stop all streams with flushing */ + sync +1: li r8,0 + .rept 8 /* 8-way set associative */ + ldx r11,r10,r8 + add r8,r8,r12 + xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not + add r8,r8,r11 // Add 0, this creates a dependency on the ldx + .endr + addi r10,r10,128 /* 128 byte cache line */ + bdnz 1b + + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) + ld r11,PACA_EXRFI+EX_R11(r13) + ld r12,PACA_EXRFI+EX_R12(r13) + ld r8,PACA_EXRFI+EX_R13(r13) + GET_SCRATCH0(r13); + hrfid + /* * Real mode exceptions actually use this too, but alternate * instruction code patches (which end up in the common .text area) diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index b89c6aac48c9bc..79e81a78354760 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -784,3 +784,82 @@ static int __init disable_hardlockup_detector(void) return 0; } early_initcall(disable_hardlockup_detector); + +#ifdef CONFIG_PPC_BOOK3S_64 +static enum l1d_flush_type enabled_flush_types; +static void *l1d_flush_fallback_area; +bool rfi_flush; + +static void do_nothing(void *unused) +{ + /* + * We don't need to do the flush explicitly, just enter+exit kernel is + * sufficient, the RFI exit handlers will do the right thing. + */ +} + +void rfi_flush_enable(bool enable) +{ + if (rfi_flush == enable) + return; + + if (enable) { + do_rfi_flush_fixups(enabled_flush_types); + on_each_cpu(do_nothing, NULL, 1); + } else + do_rfi_flush_fixups(L1D_FLUSH_NONE); + + rfi_flush = enable; +} + +static void init_fallback_flush(void) +{ + u64 l1d_size, limit; + int cpu; + + l1d_size = ppc64_caches.l1d.size; + limit = min(safe_stack_limit(), ppc64_rma_size); + + /* + * Align to L1d size, and size it at 2x L1d size, to catch possible + * hardware prefetch runoff. We don't have a recipe for load patterns to + * reliably avoid the prefetcher. + */ + l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit)); + memset(l1d_flush_fallback_area, 0, l1d_size * 2); + + for_each_possible_cpu(cpu) { + /* + * The fallback flush is currently coded for 8-way + * associativity. Different associativity is possible, but it + * will be treated as 8-way and may not evict the lines as + * effectively. + * + * 128 byte lines are mandatory. + */ + u64 c = l1d_size / 8; + + paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area; + paca[cpu].l1d_flush_congruence = c; + paca[cpu].l1d_flush_sets = c / 128; + } +} + +void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) +{ + if (types & L1D_FLUSH_FALLBACK) { + pr_info("rfi-flush: Using fallback displacement flush\n"); + init_fallback_flush(); + } + + if (types & L1D_FLUSH_ORI) + pr_info("rfi-flush: Using ori type flush\n"); + + if (types & L1D_FLUSH_MTTRIG) + pr_info("rfi-flush: Using mttrig type flush\n"); + + enabled_flush_types = types; + + rfi_flush_enable(enable); +} +#endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 0494e1566ee2ab..307843d23682a7 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -132,6 +132,15 @@ SECTIONS /* Read-only data */ RO_DATA(PAGE_SIZE) +#ifdef CONFIG_PPC64 + . = ALIGN(8); + __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) { + __start___rfi_flush_fixup = .; + *(__rfi_flush_fixup) + __stop___rfi_flush_fixup = .; + } +#endif + EXCEPTION_TABLE(0) NOTES :kernel :notes diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 41cf5ae273cf74..a95ea007d654d5 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -116,6 +116,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) } } +#ifdef CONFIG_PPC_BOOK3S_64 +void do_rfi_flush_fixups(enum l1d_flush_type types) +{ + unsigned int instrs[3], *dest; + long *start, *end; + int i; + + start = PTRRELOC(&__start___rfi_flush_fixup), + end = PTRRELOC(&__stop___rfi_flush_fixup); + + instrs[0] = 0x60000000; /* nop */ + instrs[1] = 0x60000000; /* nop */ + instrs[2] = 0x60000000; /* nop */ + + if (types & L1D_FLUSH_FALLBACK) + /* b .+16 to fallback flush */ + instrs[0] = 0x48000010; + + i = 0; + if (types & L1D_FLUSH_ORI) { + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ + instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ + } + + if (types & L1D_FLUSH_MTTRIG) + instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + + patch_instruction(dest, instrs[0]); + patch_instruction(dest + 1, instrs[1]); + patch_instruction(dest + 2, instrs[2]); + } + + printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i); +} +#endif /* CONFIG_PPC_BOOK3S_64 */ + void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) { long *start, *end; From 9472d895cdc4a9abfb000bd071b1e036cebe3333 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 10 Jan 2018 03:07:15 +1100 Subject: [PATCH 1386/3326] powerpc/64s: Support disabling RFI flush with no_rfi_flush and nopti commit bc9c9304a45480797e13a8e1df96ffcf44fb62fe upstream. Because there may be some performance overhead of the RFI flush, add kernel command line options to disable it. We add a sensibly named 'no_rfi_flush' option, but we also hijack the x86 option 'nopti'. The RFI flush is not the same as KPTI, but if we see 'nopti' we can guess that the user is trying to avoid any overhead of Meltdown mitigations, and it means we don't have to educate every one about a different command line option. Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/setup_64.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 79e81a78354760..935059cb9e403e 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -788,8 +788,29 @@ early_initcall(disable_hardlockup_detector); #ifdef CONFIG_PPC_BOOK3S_64 static enum l1d_flush_type enabled_flush_types; static void *l1d_flush_fallback_area; +static bool no_rfi_flush; bool rfi_flush; +static int __init handle_no_rfi_flush(char *p) +{ + pr_info("rfi-flush: disabled on command line."); + no_rfi_flush = true; + return 0; +} +early_param("no_rfi_flush", handle_no_rfi_flush); + +/* + * The RFI flush is not KPTI, but because users will see doco that says to use + * nopti we hijack that option here to also disable the RFI flush. + */ +static int __init handle_no_pti(char *p) +{ + pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); + handle_no_rfi_flush(NULL); + return 0; +} +early_param("nopti", handle_no_pti); + static void do_nothing(void *unused) { /* @@ -860,6 +881,7 @@ void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) enabled_flush_types = types; - rfi_flush_enable(enable); + if (!no_rfi_flush) + rfi_flush_enable(enable); } #endif /* CONFIG_PPC_BOOK3S_64 */ From 4b5158cefcbd3132c9d382252dab4889c1bc8b56 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Wed, 10 Jan 2018 03:07:15 +1100 Subject: [PATCH 1387/3326] powerpc/pseries: Query hypervisor for RFI flush settings commit 8989d56878a7735dfdb234707a2fee6faf631085 upstream. A new hypervisor call is available which tells the guest settings related to the RFI flush. Use it to query the appropriate flush instruction(s), and whether the flush is required. Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/platforms/pseries/setup.c | 35 ++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index a8531e01265842..ae4f596273b51a 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -459,6 +459,39 @@ static void __init find_and_init_phbs(void) of_pci_check_probe_only(); } +static void pseries_setup_rfi_flush(void) +{ + struct h_cpu_char_result result; + enum l1d_flush_type types; + bool enable; + long rc; + + /* Enable by default */ + enable = true; + + rc = plpar_get_cpu_characteristics(&result); + if (rc == H_SUCCESS) { + types = L1D_FLUSH_NONE; + + if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2) + types |= L1D_FLUSH_MTTRIG; + if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30) + types |= L1D_FLUSH_ORI; + + /* Use fallback if nothing set in hcall */ + if (types == L1D_FLUSH_NONE) + types = L1D_FLUSH_FALLBACK; + + if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) + enable = false; + } else { + /* Default to fallback if case hcall is not available */ + types = L1D_FLUSH_FALLBACK; + } + + setup_rfi_flush(types, enable); +} + static void __init pSeries_setup_arch(void) { set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); @@ -476,6 +509,8 @@ static void __init pSeries_setup_arch(void) fwnmi_init(); + pseries_setup_rfi_flush(); + /* By default, only probe PCI (can be overridden by rtas_pci) */ pci_add_flags(PCI_PROBE_ONLY); From 5dc597185411ef22cdb9d29563a4f447f9999afd Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 10 Jan 2018 03:07:15 +1100 Subject: [PATCH 1388/3326] powerpc/powernv: Check device-tree for RFI flush settings commit 6e032b350cd1fdb830f18f8320ef0e13b4e24094 upstream. New device-tree properties are available which tell the hypervisor settings related to the RFI flush. Use them to determine the appropriate flush instruction to use, and whether the flush is required. Signed-off-by: Oliver O'Halloran Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/platforms/powernv/setup.c | 49 ++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index bfe2aa702973ac..7966a314d93abe 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -36,13 +36,62 @@ #include #include #include +#include #include "powernv.h" +static void pnv_setup_rfi_flush(void) +{ + struct device_node *np, *fw_features; + enum l1d_flush_type type; + int enable; + + /* Default to fallback in case fw-features are not available */ + type = L1D_FLUSH_FALLBACK; + enable = 1; + + np = of_find_node_by_name(NULL, "ibm,opal"); + fw_features = of_get_child_by_name(np, "fw-features"); + of_node_put(np); + + if (fw_features) { + np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2"); + if (np && of_property_read_bool(np, "enabled")) + type = L1D_FLUSH_MTTRIG; + + of_node_put(np); + + np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0"); + if (np && of_property_read_bool(np, "enabled")) + type = L1D_FLUSH_ORI; + + of_node_put(np); + + /* Enable unless firmware says NOT to */ + enable = 2; + np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0"); + if (np && of_property_read_bool(np, "disabled")) + enable--; + + of_node_put(np); + + np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1"); + if (np && of_property_read_bool(np, "disabled")) + enable--; + + of_node_put(np); + of_node_put(fw_features); + } + + setup_rfi_flush(type, enable > 0); +} + static void __init pnv_setup_arch(void) { set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); + pnv_setup_rfi_flush(); + /* Initialize SMP */ pnv_smp_init(); From 1352130fe6aa4108fd6758687c419bb0d0c22f0d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 8 Dec 2017 13:49:39 +0100 Subject: [PATCH 1389/3326] futex: Avoid violating the 10th rule of futex commit c1e2f0eaf015fb7076d51a339011f2383e6dd389 upstream. Julia reported futex state corruption in the following scenario: waiter waker stealer (prio > waiter) futex(WAIT_REQUEUE_PI, uaddr, uaddr2, timeout=[N ms]) futex_wait_requeue_pi() futex_wait_queue_me() freezable_schedule() futex(LOCK_PI, uaddr2) futex(CMP_REQUEUE_PI, uaddr, uaddr2, 1, 0) /* requeues waiter to uaddr2 */ futex(UNLOCK_PI, uaddr2) wake_futex_pi() cmp_futex_value_locked(uaddr2, waiter) wake_up_q() task> futex(LOCK_PI, uaddr2) __rt_mutex_start_proxy_lock() try_to_take_rt_mutex() /* steals lock */ rt_mutex_set_owner(lock, stealer) rt_mutex_wait_proxy_lock() __rt_mutex_slowlock() try_to_take_rt_mutex() /* fails, lock held by stealer */ if (timeout && !timeout->task) return -ETIMEDOUT; fixup_owner() /* lock wasn't acquired, so, fixup_pi_state_owner skipped */ return -ETIMEDOUT; /* At this point, we've returned -ETIMEDOUT to userspace, but the * futex word shows waiter to be the owner, and the pi_mutex has * stealer as the owner */ futex_lock(LOCK_PI, uaddr2) -> bails with EDEADLK, futex word says we're owner. And suggested that what commit: 73d786bd043e ("futex: Rework inconsistent rt_mutex/futex_q state") removes from fixup_owner() looks to be just what is needed. And indeed it is -- I completely missed that requeue_pi could also result in this case. So we need to restore that, except that subsequent patches, like commit: 16ffa12d7425 ("futex: Pull rt_mutex_futex_unlock() out from under hb->lock") changed all the locking rules. Even without that, the sequence: - if (rt_mutex_futex_trylock(&q->pi_state->pi_mutex)) { - locked = 1; - goto out; - } - raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock); - owner = rt_mutex_owner(&q->pi_state->pi_mutex); - if (!owner) - owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); - raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock); - ret = fixup_pi_state_owner(uaddr, q, owner); already suggests there were races; otherwise we'd never have to look at next_owner. So instead of doing 3 consecutive wait_lock sections with who knows what races, we do it all in a single section. Additionally, the usage of pi_state->owner in fixup_owner() was only safe because only the rt_mutex owner would modify it, which this additional case wrecks. Luckily the values can only change away and not to the value we're testing, this means we can do a speculative test and double check once we have the wait_lock. Fixes: 73d786bd043e ("futex: Rework inconsistent rt_mutex/futex_q state") Reported-by: Julia Cartwright Reported-by: Gratian Crisan Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Tested-by: Julia Cartwright Tested-by: Gratian Crisan Cc: Darren Hart Link: https://lkml.kernel.org/r/20171208124939.7livp7no2ov65rrc@hirez.programming.kicks-ass.net Signed-off-by: Greg Kroah-Hartman --- kernel/futex.c | 83 ++++++++++++++++++++++++++------- kernel/locking/rtmutex.c | 26 ++++++++--- kernel/locking/rtmutex_common.h | 1 + 3 files changed, 87 insertions(+), 23 deletions(-) diff --git a/kernel/futex.c b/kernel/futex.c index 76ed5921117a24..29ac5b64e7c7f5 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2294,21 +2294,17 @@ static void unqueue_me_pi(struct futex_q *q) spin_unlock(q->lock_ptr); } -/* - * Fixup the pi_state owner with the new owner. - * - * Must be called with hash bucket lock held and mm->sem held for non - * private futexes. - */ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, - struct task_struct *newowner) + struct task_struct *argowner) { - u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; struct futex_pi_state *pi_state = q->pi_state; u32 uval, uninitialized_var(curval), newval; - struct task_struct *oldowner; + struct task_struct *oldowner, *newowner; + u32 newtid; int ret; + lockdep_assert_held(q->lock_ptr); + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); oldowner = pi_state->owner; @@ -2317,11 +2313,17 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, newtid |= FUTEX_OWNER_DIED; /* - * We are here either because we stole the rtmutex from the - * previous highest priority waiter or we are the highest priority - * waiter but have failed to get the rtmutex the first time. + * We are here because either: + * + * - we stole the lock and pi_state->owner needs updating to reflect + * that (@argowner == current), * - * We have to replace the newowner TID in the user space variable. + * or: + * + * - someone stole our lock and we need to fix things to point to the + * new owner (@argowner == NULL). + * + * Either way, we have to replace the TID in the user space variable. * This must be atomic as we have to preserve the owner died bit here. * * Note: We write the user space value _before_ changing the pi_state @@ -2334,6 +2336,42 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, * in the PID check in lookup_pi_state. */ retry: + if (!argowner) { + if (oldowner != current) { + /* + * We raced against a concurrent self; things are + * already fixed up. Nothing to do. + */ + ret = 0; + goto out_unlock; + } + + if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { + /* We got the lock after all, nothing to fix. */ + ret = 0; + goto out_unlock; + } + + /* + * Since we just failed the trylock; there must be an owner. + */ + newowner = rt_mutex_owner(&pi_state->pi_mutex); + BUG_ON(!newowner); + } else { + WARN_ON_ONCE(argowner != current); + if (oldowner == current) { + /* + * We raced against a concurrent self; things are + * already fixed up. Nothing to do. + */ + ret = 0; + goto out_unlock; + } + newowner = argowner; + } + + newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; + if (get_futex_value_locked(&uval, uaddr)) goto handle_fault; @@ -2434,15 +2472,28 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case: * - * We can safely read pi_state->owner without holding wait_lock - * because we now own the rt_mutex, only the owner will attempt - * to change it. + * Speculative pi_state->owner read (we don't hold wait_lock); + * since we own the lock pi_state->owner == current is the + * stable state, anything else needs more attention. */ if (q->pi_state->owner != current) ret = fixup_pi_state_owner(uaddr, q, current); goto out; } + /* + * If we didn't get the lock; check if anybody stole it from us. In + * that case, we need to fix up the uval to point to them instead of + * us, otherwise bad things happen. [10] + * + * Another speculative read; pi_state->owner == current is unstable + * but needs our attention. + */ + if (q->pi_state->owner == current) { + ret = fixup_pi_state_owner(uaddr, q, NULL); + goto out; + } + /* * Paranoia check. If we did not take the lock, then we should not be * the owner of the rt_mutex. diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 6f3dba6e4e9e14..65cc0cb984e6ae 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1290,6 +1290,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, return ret; } +static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock) +{ + int ret = try_to_take_rt_mutex(lock, current, NULL); + + /* + * try_to_take_rt_mutex() sets the lock waiters bit + * unconditionally. Clean this up. + */ + fixup_rt_mutex_waiters(lock); + + return ret; +} + /* * Slow path try-lock function: */ @@ -1312,13 +1325,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) */ raw_spin_lock_irqsave(&lock->wait_lock, flags); - ret = try_to_take_rt_mutex(lock, current, NULL); - - /* - * try_to_take_rt_mutex() sets the lock waiters bit - * unconditionally. Clean this up. - */ - fixup_rt_mutex_waiters(lock); + ret = __rt_mutex_slowtrylock(lock); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); @@ -1505,6 +1512,11 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) return rt_mutex_slowtrylock(lock); } +int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) +{ + return __rt_mutex_slowtrylock(lock); +} + /** * rt_mutex_timed_lock - lock a rt_mutex interruptible * the timeout structure is provided diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index 124e98ca0b1741..68686b3ec3c17c 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -148,6 +148,7 @@ extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter); extern int rt_mutex_futex_trylock(struct rt_mutex *l); +extern int __rt_mutex_futex_trylock(struct rt_mutex *l); extern void rt_mutex_futex_unlock(struct rt_mutex *lock); extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, From 17ae6ccfe5dd85605dc44534348b506f95d16a61 Mon Sep 17 00:00:00 2001 From: Li Jinyue Date: Thu, 14 Dec 2017 17:04:54 +0800 Subject: [PATCH 1390/3326] futex: Prevent overflow by strengthen input validation commit fbe0e839d1e22d88810f3ee3e2f1479be4c0aa4a upstream. UBSAN reports signed integer overflow in kernel/futex.c: UBSAN: Undefined behaviour in kernel/futex.c:2041:18 signed integer overflow: 0 - -2147483648 cannot be represented in type 'int' Add a sanity check to catch negative values of nr_wake and nr_requeue. Signed-off-by: Li Jinyue Signed-off-by: Thomas Gleixner Cc: peterz@infradead.org Cc: dvhart@infradead.org Link: https://lkml.kernel.org/r/1513242294-31786-1-git-send-email-lijinyue@huawei.com Signed-off-by: Greg Kroah-Hartman --- kernel/futex.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/futex.c b/kernel/futex.c index 29ac5b64e7c7f5..52b3f47031580f 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1878,6 +1878,9 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, struct futex_q *this, *next; DEFINE_WAKE_Q(wake_q); + if (nr_wake < 0 || nr_requeue < 0) + return -EINVAL; + /* * When PI not supported: return -ENOSYS if requeue_pi is true, * consequently the compiler knows requeue_pi is always false past From c3162384aed4cfe3f1a1f40041f3ba8cd7704d88 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 9 Jan 2018 23:11:03 +0100 Subject: [PATCH 1391/3326] ALSA: seq: Make ioctls race-free commit b3defb791b26ea0683a93a4f49c77ec45ec96f10 upstream. The ALSA sequencer ioctls have no protection against racy calls while the concurrent operations may lead to interfere with each other. As reported recently, for example, the concurrent calls of setting client pool with a combination of write calls may lead to either the unkillable dead-lock or UAF. As a slightly big hammer solution, this patch introduces the mutex to make each ioctl exclusive. Although this may reduce performance via parallel ioctl calls, usually it's not demanded for sequencer usages, hence it should be negligible. Reported-by: Luo Quan Reviewed-by: Kees Cook Reviewed-by: Greg Kroah-Hartman Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/seq/seq_clientmgr.c | 3 +++ sound/core/seq/seq_clientmgr.h | 1 + 2 files changed, 4 insertions(+) diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index d10c780dfd5468..ac30fc1ab98bf0 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -221,6 +221,7 @@ static struct snd_seq_client *seq_create_client1(int client_index, int poolsize) rwlock_init(&client->ports_lock); mutex_init(&client->ports_mutex); INIT_LIST_HEAD(&client->ports_list_head); + mutex_init(&client->ioctl_mutex); /* find free slot in the client table */ spin_lock_irqsave(&clients_lock, flags); @@ -2126,7 +2127,9 @@ static long snd_seq_ioctl(struct file *file, unsigned int cmd, return -EFAULT; } + mutex_lock(&client->ioctl_mutex); err = handler->func(client, &buf); + mutex_unlock(&client->ioctl_mutex); if (err >= 0) { /* Some commands includes a bug in 'dir' field. */ if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT || diff --git a/sound/core/seq/seq_clientmgr.h b/sound/core/seq/seq_clientmgr.h index c6614254ef8af2..0611e1e0ed5ba0 100644 --- a/sound/core/seq/seq_clientmgr.h +++ b/sound/core/seq/seq_clientmgr.h @@ -61,6 +61,7 @@ struct snd_seq_client { struct list_head ports_list_head; rwlock_t ports_lock; struct mutex ports_mutex; + struct mutex ioctl_mutex; int convert32; /* convert 32->64bit */ /* output pool */ From 004ceccbcfc747e23322798058c604816e73bccc Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 10 Jan 2018 23:48:05 +0100 Subject: [PATCH 1392/3326] ALSA: pcm: Remove yet superfluous WARN_ON() commit 23b19b7b50fe1867da8d431eea9cd3e4b6328c2c upstream. muldiv32() contains a snd_BUG_ON() (which is morphed as WARN_ON() with debug option) for checking the case of 0 / 0. This would be helpful if this happens only as a logical error; however, since the hw refine is performed with any data set provided by user, the inconsistent values that can trigger such a condition might be passed easily. Actually, syzbot caught this by passing some zero'ed old hw_params ioctl. So, having snd_BUG_ON() there is simply superfluous and rather harmful to give unnecessary confusions. Let's get rid of it. Reported-by: syzbot+7e6ee55011deeebce15d@syzkaller.appspotmail.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/pcm_lib.c | 1 - 1 file changed, 1 deletion(-) diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index db7894bb028ccc..faa67861cbc17e 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -560,7 +560,6 @@ static inline unsigned int muldiv32(unsigned int a, unsigned int b, { u_int64_t n = (u_int64_t) a * b; if (c == 0) { - snd_BUG_ON(!n); *r = 0; return UINT_MAX; } From f50902ad7f8133916f1d5496450734642a55280e Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 10 Jan 2018 08:34:28 +0100 Subject: [PATCH 1393/3326] ALSA: hda - Apply headphone noise quirk for another Dell XPS 13 variant commit e4c9fd10eb21376f44723c40ad12395089251c28 upstream. There is another Dell XPS 13 variant (SSID 1028:082a) that requires the existing fixup for reducing the headphone noise. This patch adds the quirk entry for that. BugLink: http://lkml.kernel.org/r/CAHXyb9ZCZJzVisuBARa+UORcjRERV8yokez=DP1_5O5isTz0ZA@mail.gmail.com Reported-and-tested-by: Francisco G. Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index acdb196ddb4410..145e92d6ca94a3 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6173,6 +6173,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), + SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), From f0299a1ceecf3ecec3b6c350a23101bdee009ae4 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 10 Jan 2018 10:53:18 +0100 Subject: [PATCH 1394/3326] ALSA: hda - Apply the existing quirk to iMac 14,1 commit 031f335cda879450095873003abb03ae8ed3b74a upstream. iMac 14,1 requires the same quirk as iMac 12,2, using GPIO 2 and 3 for headphone and speaker output amps. Add the codec SSID quirk entry (106b:0600) accordingly. BugLink: http://lkml.kernel.org/r/CAEw6Zyteav09VGHRfD5QwsfuWv5a43r0tFBNbfcHXoNrxVz7ew@mail.gmail.com Reported-by: Freaky Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_cirrus.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 80bbadc8372144..d6e079f4ec09d2 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -408,6 +408,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = { /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/ /* codec SSID */ + SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122), SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), From b3049d3d0688b1647cba4a129b156a510dbbb0b4 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 9 Jan 2018 23:03:46 +0300 Subject: [PATCH 1395/3326] IB/hfi1: Prevent a NULL dereference commit 57194fa763bfa1a0908f30d4c77835beaa118fcb upstream. In the original code, we set "fd->uctxt" to NULL and then dereference it which will cause an Oops. Fixes: f2a3bc00a03c ("IB/hfi1: Protect context array set/clear with spinlock") Signed-off-by: Dan Carpenter Reviewed-by: Michael J. Ruhl Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/hfi1/file_ops.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index d9a1e989313641..fd28f09b44452a 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -881,11 +881,11 @@ static int complete_subctxt(struct hfi1_filedata *fd) } if (ret) { - hfi1_rcd_put(fd->uctxt); - fd->uctxt = NULL; spin_lock_irqsave(&fd->dd->uctxt_lock, flags); __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags); + hfi1_rcd_put(fd->uctxt); + fd->uctxt = NULL; } return ret; From 6985762172f37bd29fb1dd2bdeccb11a182a86bf Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Fri, 12 Jan 2018 07:58:39 +0200 Subject: [PATCH 1396/3326] RDMA/mlx5: Fix out-of-bound access while querying AH commit ae59c3f0b6cfd472fed96e50548a799b8971d876 upstream. The rdma_ah_find_type() accesses the port array based on an index controlled by userspace. The existing bounds check is after the first use of the index, so userspace can generate an out of bounds access, as shown by the KASN report below. ================================================================== BUG: KASAN: slab-out-of-bounds in to_rdma_ah_attr+0xa8/0x3b0 Read of size 4 at addr ffff880019ae2268 by task ibv_rc_pingpong/409 CPU: 0 PID: 409 Comm: ibv_rc_pingpong Not tainted 4.15.0-rc2-00031-gb60a3faf5b83-dirty #3 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014 Call Trace: dump_stack+0xe9/0x18f print_address_description+0xa2/0x350 kasan_report+0x3a5/0x400 to_rdma_ah_attr+0xa8/0x3b0 mlx5_ib_query_qp+0xd35/0x1330 ib_query_qp+0x8a/0xb0 ib_uverbs_query_qp+0x237/0x7f0 ib_uverbs_write+0x617/0xd80 __vfs_write+0xf7/0x500 vfs_write+0x149/0x310 SyS_write+0xca/0x190 entry_SYSCALL_64_fastpath+0x18/0x85 RIP: 0033:0x7fe9c7a275a0 RSP: 002b:00007ffee5498738 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 RAX: ffffffffffffffda RBX: 00007fe9c7ce4b00 RCX: 00007fe9c7a275a0 RDX: 0000000000000018 RSI: 00007ffee5498800 RDI: 0000000000000003 RBP: 000055d0c8d3f010 R08: 00007ffee5498800 R09: 0000000000000018 R10: 00000000000000ba R11: 0000000000000246 R12: 0000000000008000 R13: 0000000000004fb0 R14: 000055d0c8d3f050 R15: 00007ffee5498560 Allocated by task 1: __kmalloc+0x3f9/0x430 alloc_mad_private+0x25/0x50 ib_mad_post_receive_mads+0x204/0xa60 ib_mad_init_device+0xa59/0x1020 ib_register_device+0x83a/0xbc0 mlx5_ib_add+0x50e/0x5c0 mlx5_add_device+0x142/0x410 mlx5_register_interface+0x18f/0x210 mlx5_ib_init+0x56/0x63 do_one_initcall+0x15b/0x270 kernel_init_freeable+0x2d8/0x3d0 kernel_init+0x14/0x190 ret_from_fork+0x24/0x30 Freed by task 0: (stack is not available) The buggy address belongs to the object at ffff880019ae2000 which belongs to the cache kmalloc-512 of size 512 The buggy address is located 104 bytes to the right of 512-byte region [ffff880019ae2000, ffff880019ae2200) The buggy address belongs to the page: page:000000005d674e18 count:1 mapcount:0 mapping: (null) index:0x0 compound_mapcount: 0 flags: 0x4000000000008100(slab|head) raw: 4000000000008100 0000000000000000 0000000000000000 00000001000c000c raw: dead000000000100 dead000000000200 ffff88001a402000 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff880019ae2100: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffff880019ae2180: 00 00 00 00 00 00 00 00 00 00 00 00 00 fc fc fc >ffff880019ae2200: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ^ ffff880019ae2280: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffff880019ae2300: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ================================================================== Disabling lock debugging due to kernel taint Fixes: 44c58487d51a ("IB/core: Define 'ib' and 'roce' rdma_ah_attr types") Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/mlx5/qp.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index acb79d3a4f1d37..756ece6118c0f6 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -4303,12 +4303,11 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev, memset(ah_attr, 0, sizeof(*ah_attr)); - ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port); - rdma_ah_set_port_num(ah_attr, path->port); - if (rdma_ah_get_port_num(ah_attr) == 0 || - rdma_ah_get_port_num(ah_attr) > MLX5_CAP_GEN(dev, num_ports)) + if (!path->port || path->port > MLX5_CAP_GEN(dev, num_ports)) return; + ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port); + rdma_ah_set_port_num(ah_attr, path->port); rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf); From 4db98c583205ce93771ee16eeb5543185706df0c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 14 Jan 2018 23:19:49 +0100 Subject: [PATCH 1397/3326] timers: Unconditionally check deferrable base commit ed4bbf7910b28ce3c691aef28d245585eaabda06 upstream. When the timer base is checked for expired timers then the deferrable base must be checked as well. This was missed when making the deferrable base independent of base::nohz_active. Fixes: ced6d5c11d3e ("timers: Use deferrable base independent of base::nohz_active") Signed-off-by: Thomas Gleixner Cc: Anna-Maria Gleixner Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Sebastian Siewior Cc: Paul McKenney Cc: rt@linutronix.de Signed-off-by: Greg Kroah-Hartman --- kernel/time/timer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 73e3cdbc61f108..db5e6daadd94ed 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1656,7 +1656,7 @@ void run_local_timers(void) hrtimer_run_queues(); /* Raise the softirq only if required. */ if (time_before(jiffies, base->clk)) { - if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active) + if (!IS_ENABLED(CONFIG_NO_HZ_COMMON)) return; /* CPU is awake, so check the deferrable base. */ base++; From bee113ae1a606b5f5a2bcd16a54a3889bd2e51b2 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Dec 2017 18:13:05 -0600 Subject: [PATCH 1398/3326] af_key: fix buffer overread in verify_address_len() commit 06b335cb51af018d5feeff5dd4fd53847ddb675a upstream. If a message sent to a PF_KEY socket ended with one of the extensions that takes a 'struct sadb_address' but there were not enough bytes remaining in the message for the ->sa_family member of the 'struct sockaddr' which is supposed to follow, then verify_address_len() read past the end of the message, into uninitialized memory. Fix it by returning -EINVAL in this case. This bug was found using syzkaller with KMSAN. Reproducer: #include #include #include int main() { int sock = socket(PF_KEY, SOCK_RAW, PF_KEY_V2); char buf[24] = { 0 }; struct sadb_msg *msg = (void *)buf; struct sadb_address *addr = (void *)(msg + 1); msg->sadb_msg_version = PF_KEY_V2; msg->sadb_msg_type = SADB_DELETE; msg->sadb_msg_len = 3; addr->sadb_address_len = 1; addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; write(sock, buf, 24); } Reported-by: Alexander Potapenko Signed-off-by: Eric Biggers Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman --- net/key/af_key.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/key/af_key.c b/net/key/af_key.c index a00d607e7224d4..4d6885e51acb85 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -401,6 +401,11 @@ static int verify_address_len(const void *p) #endif int len; + if (sp->sadb_address_len < + DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family), + sizeof(uint64_t))) + return -EINVAL; + switch (addr->sa_family) { case AF_INET: len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t)); From 754705d8e0e12c05310117dd42ff3eb2681365fd Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Dec 2017 18:15:23 -0600 Subject: [PATCH 1399/3326] af_key: fix buffer overread in parse_exthdrs() commit 4e765b4972af7b07adcb1feb16e7a525ce1f6b28 upstream. If a message sent to a PF_KEY socket ended with an incomplete extension header (fewer than 4 bytes remaining), then parse_exthdrs() read past the end of the message, into uninitialized memory. Fix it by returning -EINVAL in this case. Reproducer: #include #include #include int main() { int sock = socket(PF_KEY, SOCK_RAW, PF_KEY_V2); char buf[17] = { 0 }; struct sadb_msg *msg = (void *)buf; msg->sadb_msg_version = PF_KEY_V2; msg->sadb_msg_type = SADB_DELETE; msg->sadb_msg_len = 2; write(sock, buf, 17); } Signed-off-by: Eric Biggers Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman --- net/key/af_key.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/key/af_key.c b/net/key/af_key.c index 4d6885e51acb85..2ad693232f748e 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -516,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void * uint16_t ext_type; int ext_len; + if (len < sizeof(*ehdr)) + return -EINVAL; + ext_len = ehdr->sadb_ext_len; ext_len *= sizeof(uint64_t); ext_type = ehdr->sadb_ext_type; From 9ace222b5db287645bbe6c5531201750d3fbe493 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 26 Nov 2017 15:31:04 +0200 Subject: [PATCH 1400/3326] iser-target: Fix possible use-after-free in connection establishment error commit cd52cb26e7ead5093635e98e07e221e4df482d34 upstream. In case we fail to establish the connection we must drain our pre-posted login recieve work request before continuing safely with connection teardown. Fixes: a060b5629ab0 ("IB/core: generic RDMA READ/WRITE API") Reported-by: Amrani, Ram Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/ulp/isert/ib_isert.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index ceabdb85df8bd7..9d4785ba24cbc5 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -741,6 +741,7 @@ isert_connect_error(struct rdma_cm_id *cma_id) { struct isert_conn *isert_conn = cma_id->qp->qp_context; + ib_drain_qp(isert_conn->qp); list_del_init(&isert_conn->node); isert_conn->cm_id = NULL; isert_put_conn(isert_conn); From 36ae2e6f5c01f2c01c3224969f676b1b3b547aaf Mon Sep 17 00:00:00 2001 From: Josh Snyder Date: Mon, 18 Dec 2017 16:15:10 +0000 Subject: [PATCH 1401/3326] delayacct: Account blkio completion on the correct task commit c96f5471ce7d2aefd0dda560cc23f08ab00bc65d upstream. Before commit: e33a9bba85a8 ("sched/core: move IO scheduling accounting from io_schedule_timeout() into scheduler") delayacct_blkio_end() was called after context-switching into the task which completed I/O. This resulted in double counting: the task would account a delay both waiting for I/O and for time spent in the runqueue. With e33a9bba85a8, delayacct_blkio_end() is called by try_to_wake_up(). In ttwu, we have not yet context-switched. This is more correct, in that the delay accounting ends when the I/O is complete. But delayacct_blkio_end() relies on 'get_current()', and we have not yet context-switched into the task whose I/O completed. This results in the wrong task having its delay accounting statistics updated. Instead of doing that, pass the task_struct being woken to delayacct_blkio_end(), so that it can update the statistics of the correct task. Signed-off-by: Josh Snyder Acked-by: Tejun Heo Acked-by: Balbir Singh Cc: Brendan Gregg Cc: Jens Axboe Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-block@vger.kernel.org Fixes: e33a9bba85a8 ("sched/core: move IO scheduling accounting from io_schedule_timeout() into scheduler") Link: http://lkml.kernel.org/r/1513613712-571-1-git-send-email-joshs@netflix.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- include/linux/delayacct.h | 8 ++++---- kernel/delayacct.c | 42 ++++++++++++++++++++++++--------------- kernel/sched/core.c | 6 +++--- 3 files changed, 33 insertions(+), 23 deletions(-) diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 4178d24935477d..5e335b6203f49d 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -71,7 +71,7 @@ extern void delayacct_init(void); extern void __delayacct_tsk_init(struct task_struct *); extern void __delayacct_tsk_exit(struct task_struct *); extern void __delayacct_blkio_start(void); -extern void __delayacct_blkio_end(void); +extern void __delayacct_blkio_end(struct task_struct *); extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); extern __u64 __delayacct_blkio_ticks(struct task_struct *); extern void __delayacct_freepages_start(void); @@ -122,10 +122,10 @@ static inline void delayacct_blkio_start(void) __delayacct_blkio_start(); } -static inline void delayacct_blkio_end(void) +static inline void delayacct_blkio_end(struct task_struct *p) { if (current->delays) - __delayacct_blkio_end(); + __delayacct_blkio_end(p); delayacct_clear_flag(DELAYACCT_PF_BLKIO); } @@ -169,7 +169,7 @@ static inline void delayacct_tsk_free(struct task_struct *tsk) {} static inline void delayacct_blkio_start(void) {} -static inline void delayacct_blkio_end(void) +static inline void delayacct_blkio_end(struct task_struct *p) {} static inline int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 4a1c33416b6a2d..e2764d767f186e 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -51,16 +51,16 @@ void __delayacct_tsk_init(struct task_struct *tsk) * Finish delay accounting for a statistic using its timestamps (@start), * accumalator (@total) and @count */ -static void delayacct_end(u64 *start, u64 *total, u32 *count) +static void delayacct_end(spinlock_t *lock, u64 *start, u64 *total, u32 *count) { s64 ns = ktime_get_ns() - *start; unsigned long flags; if (ns > 0) { - spin_lock_irqsave(¤t->delays->lock, flags); + spin_lock_irqsave(lock, flags); *total += ns; (*count)++; - spin_unlock_irqrestore(¤t->delays->lock, flags); + spin_unlock_irqrestore(lock, flags); } } @@ -69,17 +69,25 @@ void __delayacct_blkio_start(void) current->delays->blkio_start = ktime_get_ns(); } -void __delayacct_blkio_end(void) +/* + * We cannot rely on the `current` macro, as we haven't yet switched back to + * the process being woken. + */ +void __delayacct_blkio_end(struct task_struct *p) { - if (current->delays->flags & DELAYACCT_PF_SWAPIN) - /* Swapin block I/O */ - delayacct_end(¤t->delays->blkio_start, - ¤t->delays->swapin_delay, - ¤t->delays->swapin_count); - else /* Other block I/O */ - delayacct_end(¤t->delays->blkio_start, - ¤t->delays->blkio_delay, - ¤t->delays->blkio_count); + struct task_delay_info *delays = p->delays; + u64 *total; + u32 *count; + + if (p->delays->flags & DELAYACCT_PF_SWAPIN) { + total = &delays->swapin_delay; + count = &delays->swapin_count; + } else { + total = &delays->blkio_delay; + count = &delays->blkio_count; + } + + delayacct_end(&delays->lock, &delays->blkio_start, total, count); } int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) @@ -153,8 +161,10 @@ void __delayacct_freepages_start(void) void __delayacct_freepages_end(void) { - delayacct_end(¤t->delays->freepages_start, - ¤t->delays->freepages_delay, - ¤t->delays->freepages_count); + delayacct_end( + ¤t->delays->lock, + ¤t->delays->freepages_start, + ¤t->delays->freepages_delay, + ¤t->delays->freepages_count); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8fa7b6f9e19b86..55062461b2fd16 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2046,7 +2046,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) p->state = TASK_WAKING; if (p->in_iowait) { - delayacct_blkio_end(); + delayacct_blkio_end(p); atomic_dec(&task_rq(p)->nr_iowait); } @@ -2059,7 +2059,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) #else /* CONFIG_SMP */ if (p->in_iowait) { - delayacct_blkio_end(); + delayacct_blkio_end(p); atomic_dec(&task_rq(p)->nr_iowait); } @@ -2112,7 +2112,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf) if (!task_on_rq_queued(p)) { if (p->in_iowait) { - delayacct_blkio_end(); + delayacct_blkio_end(p); atomic_dec(&rq->nr_iowait); } ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK); From f41b2d7ee791e845147a3e3cafb25493dca6870a Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 15 Jan 2018 08:17:07 -0600 Subject: [PATCH 1402/3326] objtool: Fix seg fault with gold linker commit 2a0098d70640dda192a79966c14d449e7a34d675 upstream. Objtool segfaults when the gold linker is used with CONFIG_MODVERSIONS=y and CONFIG_UNWINDER_ORC=y. With CONFIG_MODVERSIONS=y, the .o file gets passed to the linker before being passed to objtool. The gold linker seems to strip unused ELF symbols by default, which confuses objtool and causes the seg fault when it's trying to generate ORC metadata. Objtool should really be running immediately after GCC anyway, without a linker call in between. Change the makefile ordering so that objtool is called before the linker. Reported-and-tested-by: Markus Signed-off-by: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: ee9f8fce9964 ("x86/unwind: Add the ORC unwinder") Link: http://lkml.kernel.org/r/355f04da33581f4a3bf82e5b512973624a1e23a2.1516025651.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Cc: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- scripts/Makefile.build | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index e63af4e19382af..6bed45dc2cb1bd 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -270,12 +270,18 @@ else objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable) endif +ifdef CONFIG_MODVERSIONS +objtool_o = $(@D)/.tmp_$(@F) +else +objtool_o = $(@) +endif + # 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory # 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file # 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file cmd_objtool = $(if $(patsubst y%,, \ $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \ - $(__objtool_obj) $(objtool_args) "$(@)";) + $(__objtool_obj) $(objtool_args) "$(objtool_o)";) objtool_obj = $(if $(patsubst y%,, \ $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \ $(__objtool_obj)) @@ -291,15 +297,15 @@ objtool_dep = $(objtool_obj) \ define rule_cc_o_c $(call echo-cmd,checksrc) $(cmd_checksrc) \ $(call cmd_and_fixdep,cc_o_c) \ - $(cmd_modversions_c) \ $(call echo-cmd,objtool) $(cmd_objtool) \ + $(cmd_modversions_c) \ $(call echo-cmd,record_mcount) $(cmd_record_mcount) endef define rule_as_o_S $(call cmd_and_fixdep,as_o_S) \ - $(cmd_modversions_S) \ - $(call echo-cmd,objtool) $(cmd_objtool) + $(call echo-cmd,objtool) $(cmd_objtool) \ + $(cmd_modversions_S) endef # List module undefined symbols (or empty line if not enabled) From f2264bb027c6d0ad6405cddb37ca9ebad3209316 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Th=C3=A9baudeau?= Date: Sun, 14 Jan 2018 19:43:05 +0100 Subject: [PATCH 1403/3326] mmc: sdhci-esdhc-imx: Fix i.MX53 eSDHCv3 clock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 499ed50f603b4c9834197b2411ba3bd9aaa624d4 upstream. Commit 5143c953a786 ("mmc: sdhci-esdhc-imx: Allow all supported prescaler values") made it possible to set SYSCTL.SDCLKFS to 0 in SDR mode, thus bypassing the SD clock frequency prescaler, in order to be able to get higher SD clock frequencies in some contexts. However, that commit missed the fact that this value is illegal on the eSDHCv3 instance of the i.MX53. This seems to be the only exception on i.MX, this value being legal even for the eSDHCv2 instances of the i.MX53. Fix this issue by changing the minimum prescaler value if the i.MX53 eSDHCv3 is detected. According to the i.MX53 reference manual, if DLLCTRL[10] can be set, then the controller is eSDHCv3, else it is eSDHCv2. This commit fixes the following issue, which was preventing the i.MX53 Loco (IMX53QSB) board from booting Linux 4.15.0-rc5: [ 1.882668] mmcblk1: error -84 transferring data, sector 2048, nr 8, cmd response 0x900, card status 0xc00 [ 2.002255] mmcblk1: error -84 transferring data, sector 2050, nr 6, cmd response 0x900, card status 0xc00 [ 12.645056] mmc1: Timeout waiting for hardware interrupt. [ 12.650473] mmc1: sdhci: ============ SDHCI REGISTER DUMP =========== [ 12.656921] mmc1: sdhci: Sys addr: 0x00000000 | Version: 0x00001201 [ 12.663366] mmc1: sdhci: Blk size: 0x00000004 | Blk cnt: 0x00000000 [ 12.669813] mmc1: sdhci: Argument: 0x00000000 | Trn mode: 0x00000013 [ 12.676258] mmc1: sdhci: Present: 0x01f8028f | Host ctl: 0x00000013 [ 12.682703] mmc1: sdhci: Power: 0x00000002 | Blk gap: 0x00000000 [ 12.689148] mmc1: sdhci: Wake-up: 0x00000000 | Clock: 0x0000003f [ 12.695594] mmc1: sdhci: Timeout: 0x0000008e | Int stat: 0x00000000 [ 12.702039] mmc1: sdhci: Int enab: 0x107f004b | Sig enab: 0x107f004b [ 12.708485] mmc1: sdhci: AC12 err: 0x00000000 | Slot int: 0x00001201 [ 12.714930] mmc1: sdhci: Caps: 0x07eb0000 | Caps_1: 0x08100810 [ 12.721375] mmc1: sdhci: Cmd: 0x0000163a | Max curr: 0x00000000 [ 12.727821] mmc1: sdhci: Resp[0]: 0x00000920 | Resp[1]: 0x00000000 [ 12.734265] mmc1: sdhci: Resp[2]: 0x00000000 | Resp[3]: 0x00000000 [ 12.740709] mmc1: sdhci: Host ctl2: 0x00000000 [ 12.745157] mmc1: sdhci: ADMA Err: 0x00000001 | ADMA Ptr: 0xc8049200 [ 12.751601] mmc1: sdhci: ============================================ [ 12.758110] print_req_error: I/O error, dev mmcblk1, sector 2050 [ 12.764135] Buffer I/O error on dev mmcblk1p1, logical block 0, lost sync page write [ 12.775163] EXT4-fs (mmcblk1p1): mounted filesystem without journal. Opts: (null) [ 12.782746] VFS: Mounted root (ext4 filesystem) on device 179:9. [ 12.789151] mmcblk1: response CRC error sending SET_BLOCK_COUNT command, card status 0x900 Signed-off-by: Benoît Thébaudeau Reported-by: Wladimir J. van der Laan Tested-by: Wladimir J. van der Laan Fixes: 5143c953a786 ("mmc: sdhci-esdhc-imx: Allow all supported prescaler values") Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/sdhci-esdhc-imx.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 85140c9af58129..8b941f814472f8 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -687,6 +687,20 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host, return; } + /* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */ + if (is_imx53_esdhc(imx_data)) { + /* + * According to the i.MX53 reference manual, if DLLCTRL[10] can + * be set, then the controller is eSDHCv3, else it is eSDHCv2. + */ + val = readl(host->ioaddr + ESDHC_DLL_CTRL); + writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL); + temp = readl(host->ioaddr + ESDHC_DLL_CTRL); + writel(val, host->ioaddr + ESDHC_DLL_CTRL); + if (temp & BIT(10)) + pre_div = 2; + } + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); From fbb8c0acc8e1458acdae392a5226b371243444e4 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Wed, 10 Jan 2018 18:36:02 +0300 Subject: [PATCH 1404/3326] x86/kasan: Panic if there is not enough memory to boot commit 0d39e2669d7b0fefd2d8f9e7868ae669b364d9ba upstream. Currently KASAN doesn't panic in case it don't have enough memory to boot. Instead, it crashes in some random place: kernel BUG at arch/x86/mm/physaddr.c:27! RIP: 0010:__phys_addr+0x268/0x276 Call Trace: kasan_populate_shadow+0x3f2/0x497 kasan_init+0x12e/0x2b2 setup_arch+0x2825/0x2a2c start_kernel+0xc8/0x15f4 x86_64_start_reservations+0x2a/0x2c x86_64_start_kernel+0x72/0x75 secondary_startup_64+0xa5/0xb0 Use memblock_virt_alloc_try_nid() for allocations without failure fallback. It will panic with an out of memory message. Reported-by: kernel test robot Signed-off-by: Andrey Ryabinin Signed-off-by: Thomas Gleixner Acked-by: Dmitry Vyukov Cc: kasan-dev@googlegroups.com Cc: Alexander Potapenko Cc: lkp@01.org Link: https://lkml.kernel.org/r/20180110153602.18919-1-aryabinin@virtuozzo.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/kasan_init_64.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 47388f0c0e5964..af6f2f9c6a265d 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -21,10 +21,14 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES]; static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); -static __init void *early_alloc(size_t size, int nid) +static __init void *early_alloc(size_t size, int nid, bool panic) { - return memblock_virt_alloc_try_nid_nopanic(size, size, - __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); + if (panic) + return memblock_virt_alloc_try_nid(size, size, + __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); + else + return memblock_virt_alloc_try_nid_nopanic(size, size, + __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); } static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, @@ -38,14 +42,14 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, if (boot_cpu_has(X86_FEATURE_PSE) && ((end - addr) == PMD_SIZE) && IS_ALIGNED(addr, PMD_SIZE)) { - p = early_alloc(PMD_SIZE, nid); + p = early_alloc(PMD_SIZE, nid, false); if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) return; else if (p) memblock_free(__pa(p), PMD_SIZE); } - p = early_alloc(PAGE_SIZE, nid); + p = early_alloc(PAGE_SIZE, nid, true); pmd_populate_kernel(&init_mm, pmd, p); } @@ -57,7 +61,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, if (!pte_none(*pte)) continue; - p = early_alloc(PAGE_SIZE, nid); + p = early_alloc(PAGE_SIZE, nid, true); entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); set_pte_at(&init_mm, addr, pte, entry); } while (pte++, addr += PAGE_SIZE, addr != end); @@ -75,14 +79,14 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr, if (boot_cpu_has(X86_FEATURE_GBPAGES) && ((end - addr) == PUD_SIZE) && IS_ALIGNED(addr, PUD_SIZE)) { - p = early_alloc(PUD_SIZE, nid); + p = early_alloc(PUD_SIZE, nid, false); if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) return; else if (p) memblock_free(__pa(p), PUD_SIZE); } - p = early_alloc(PAGE_SIZE, nid); + p = early_alloc(PAGE_SIZE, nid, true); pud_populate(&init_mm, pud, p); } @@ -101,7 +105,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr, unsigned long next; if (p4d_none(*p4d)) { - void *p = early_alloc(PAGE_SIZE, nid); + void *p = early_alloc(PAGE_SIZE, nid, true); p4d_populate(&init_mm, p4d, p); } @@ -122,7 +126,7 @@ static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, unsigned long next; if (pgd_none(*pgd)) { - p = early_alloc(PAGE_SIZE, nid); + p = early_alloc(PAGE_SIZE, nid, true); pgd_populate(&init_mm, pgd, p); } From 051547583bdda4b74953053a1034026c56b55c4c Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 12 Jan 2018 17:49:25 +0000 Subject: [PATCH 1405/3326] x86/retpoline: Fill RSB on context switch for affected CPUs commit c995efd5a740d9cbafbf58bde4973e8b50b4d761 upstream. On context switch from a shallow call stack to a deeper one, as the CPU does 'ret' up the deeper side it may encounter RSB entries (predictions for where the 'ret' goes to) which were populated in userspace. This is problematic if neither SMEP nor KPTI (the latter of which marks userspace pages as NX for the kernel) are active, as malicious code in userspace may then be executed speculatively. Overwrite the CPU's return prediction stack with calls which are predicted to return to an infinite loop, to "capture" speculation if this happens. This is required both for retpoline, and also in conjunction with IBRS for !SMEP && !KPTI. On Skylake+ the problem is slightly different, and an *underflow* of the RSB may cause errant branch predictions to occur. So there it's not so much overwrite, as *filling* the RSB to attempt to prevent it getting empty. This is only a partial solution for Skylake+ since there are many other conditions which may result in the RSB becoming empty. The full solution on Skylake+ is to use IBRS, which will prevent the problem even when the RSB becomes empty. With IBRS, the RSB-stuffing will not be required on context switch. [ tglx: Added missing vendor check and slighty massaged comments and changelog ] Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Acked-by: Arjan van de Ven Cc: gnomes@lxorguk.ukuu.org.uk Cc: Rik van Riel Cc: Andi Kleen Cc: Josh Poimboeuf Cc: thomas.lendacky@amd.com Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Kees Cook Cc: Tim Chen Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1515779365-9032-1-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_32.S | 11 +++++++++ arch/x86/entry/entry_64.S | 11 +++++++++ arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/kernel/cpu/bugs.c | 36 ++++++++++++++++++++++++++++++ 4 files changed, 59 insertions(+) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index a1f28a54f23a42..60c4c342316cdf 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -244,6 +244,17 @@ ENTRY(__switch_to_asm) movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset #endif +#ifdef CONFIG_RETPOLINE + /* + * When switching from a shallower to a deeper call stack + * the RSB may either underflow or use entries populated + * with userspace addresses. On CPUs where those concerns + * exist, overwrite the RSB with entries which capture + * speculative execution to prevent attack. + */ + FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW +#endif + /* restore callee-saved registers */ popl %esi popl %edi diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index f5fda5f26e3468..6923f456dc0714 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -487,6 +487,17 @@ ENTRY(__switch_to_asm) movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset #endif +#ifdef CONFIG_RETPOLINE + /* + * When switching from a shallower to a deeper call stack + * the RSB may either underflow or use entries populated + * with userspace addresses. On CPUs where those concerns + * exist, overwrite the RSB with entries which capture + * speculative execution to prevent attack. + */ + FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW +#endif + /* restore callee-saved registers */ popq %r15 popq %r14 diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index f275447862f458..aa09559b2c0bff 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -211,6 +211,7 @@ #define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ +#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index e4dc26185aa70b..390b3dc3d43827 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -23,6 +23,7 @@ #include #include #include +#include static void __init spectre_v2_select_mitigation(void); @@ -155,6 +156,23 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) return SPECTRE_V2_CMD_NONE; } +/* Check for Skylake-like CPUs (for RSB handling) */ +static bool __init is_skylake_era(void) +{ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6) { + switch (boot_cpu_data.x86_model) { + case INTEL_FAM6_SKYLAKE_MOBILE: + case INTEL_FAM6_SKYLAKE_DESKTOP: + case INTEL_FAM6_SKYLAKE_X: + case INTEL_FAM6_KABYLAKE_MOBILE: + case INTEL_FAM6_KABYLAKE_DESKTOP: + return true; + } + } + return false; +} + static void __init spectre_v2_select_mitigation(void) { enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); @@ -213,6 +231,24 @@ static void __init spectre_v2_select_mitigation(void) spectre_v2_enabled = mode; pr_info("%s\n", spectre_v2_strings[mode]); + + /* + * If neither SMEP or KPTI are available, there is a risk of + * hitting userspace addresses in the RSB after a context switch + * from a shallow call stack to a deeper one. To prevent this fill + * the entire RSB, even when using IBRS. + * + * Skylake era CPUs have a separate issue with *underflow* of the + * RSB, when they will predict 'ret' targets from the generic BTB. + * The proper mitigation for this is IBRS. If IBRS is not supported + * or deactivated in favour of retpolines the RSB fill on context + * switch is required. + */ + if ((!boot_cpu_has(X86_FEATURE_PTI) && + !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { + setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); + pr_info("Filling RSB on context switch\n"); + } } #undef pr_fmt From 956ec9e7b59a1fb3fd4c9bc8a13a7f7700e9d7d2 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Sat, 13 Jan 2018 17:27:30 -0600 Subject: [PATCH 1406/3326] x86/retpoline: Add LFENCE to the retpoline/RSB filling RSB macros commit 28d437d550e1e39f805d99f9f8ac399c778827b7 upstream. The PAUSE instruction is currently used in the retpoline and RSB filling macros as a speculation trap. The use of PAUSE was originally suggested because it showed a very, very small difference in the amount of cycles/time used to execute the retpoline as compared to LFENCE. On AMD, the PAUSE instruction is not a serializing instruction, so the pause/jmp loop will use excess power as it is speculated over waiting for return to mispredict to the correct target. The RSB filling macro is applicable to AMD, and, if software is unable to verify that LFENCE is serializing on AMD (possible when running under a hypervisor), the generic retpoline support will be used and, so, is also applicable to AMD. Keep the current usage of PAUSE for Intel, but add an LFENCE instruction to the speculation trap for AMD. The same sequence has been adopted by GCC for the GCC generated retpolines. Signed-off-by: Tom Lendacky Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Acked-by: David Woodhouse Acked-by: Arjan van de Ven Cc: Rik van Riel Cc: Andi Kleen Cc: Paul Turner Cc: Peter Zijlstra Cc: Tim Chen Cc: Jiri Kosina Cc: Dave Hansen Cc: Andy Lutomirski Cc: Josh Poimboeuf Cc: Dan Williams Cc: Linus Torvalds Cc: Greg Kroah-Hartman Cc: Kees Cook Link: https://lkml.kernel.org/r/20180113232730.31060.36287.stgit@tlendack-t1.amdoffice.net Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/nospec-branch.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 402a11c803c38b..7b45d842415089 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -11,7 +11,7 @@ * Fill the CPU return stack buffer. * * Each entry in the RSB, if used for a speculative 'ret', contains an - * infinite 'pause; jmp' loop to capture speculative execution. + * infinite 'pause; lfence; jmp' loop to capture speculative execution. * * This is required in various cases for retpoline and IBRS-based * mitigations for the Spectre variant 2 vulnerability. Sometimes to @@ -38,11 +38,13 @@ call 772f; \ 773: /* speculation trap */ \ pause; \ + lfence; \ jmp 773b; \ 772: \ call 774f; \ 775: /* speculation trap */ \ pause; \ + lfence; \ jmp 775b; \ 774: \ dec reg; \ @@ -73,6 +75,7 @@ call .Ldo_rop_\@ .Lspec_trap_\@: pause + lfence jmp .Lspec_trap_\@ .Ldo_rop_\@: mov \reg, (%_ASM_SP) @@ -165,6 +168,7 @@ " .align 16\n" \ "901: call 903f;\n" \ "902: pause;\n" \ + " lfence;\n" \ " jmp 902b;\n" \ " .align 16\n" \ "903: addl $4, %%esp;\n" \ From f45bbd95be060a7eb67ce2f1a1404a82be93d0e3 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 15 Jan 2018 08:17:08 -0600 Subject: [PATCH 1407/3326] objtool: Improve error message for bad file argument commit 385d11b152c4eb638eeb769edcb3249533bb9a00 upstream. If a nonexistent file is supplied to objtool, it complains with a non-helpful error: open: No such file or directory Improve it to: objtool: Can't open 'foo': No such file or directory Reported-by: Markus Signed-off-by: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/406a3d00a21225eee2819844048e17f68523ccf6.1516025651.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/objtool/elf.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 24460155c82c9b..c1c33866169978 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "elf.h" #include "warn.h" @@ -358,7 +359,8 @@ struct elf *elf_open(const char *name, int flags) elf->fd = open(name, flags); if (elf->fd == -1) { - perror("open"); + fprintf(stderr, "objtool: Can't open '%s': %s\n", + name, strerror(errno)); goto err; } From 4d9c9abf6dea55e6af641ac5d7984c77338b2921 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 16 Jan 2018 16:42:25 +0100 Subject: [PATCH 1408/3326] x86/cpufeature: Move processor tracing out of scattered features MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 4fdec2034b7540dda461c6ba33325dfcff345c64 upstream. Processor tracing is already enumerated in word 9 (CPUID[7,0].EBX), so do not duplicate it in the scattered features word. Besides being more tidy, this will be useful for KVM when it presents processor tracing to the guests. KVM selects host features that are supported by both the host kernel (depending on command line options, CPU errata, or whatever) and KVM. Whenever a full feature word exists, KVM's code is written in the expectation that the CPUID bit number matches the X86_FEATURE_* bit number, but this is not the case for X86_FEATURE_INTEL_PT. Signed-off-by: Paolo Bonzini Cc: Borislav Petkov Cc: Linus Torvalds Cc: Luwei Kang Cc: Peter Zijlstra Cc: Radim Krčmář Cc: Thomas Gleixner Cc: kvm@vger.kernel.org Link: http://lkml.kernel.org/r/1516117345-34561-1-git-send-email-pbonzini@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 2 +- arch/x86/kernel/cpu/scattered.c | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index aa09559b2c0bff..25b9375c1484b4 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -206,7 +206,6 @@ #define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ -#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ #define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */ #define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */ @@ -246,6 +245,7 @@ #define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ #define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ +#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */ #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ #define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ #define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 05459ad3db46e2..d0e69769abfd39 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -21,7 +21,6 @@ struct cpuid_bit { static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, - { X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 }, { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 }, { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 }, { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 }, From 8c556782901574e6a9d758fb1ef74cef54bf9a65 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 16 Jan 2018 12:52:28 -0800 Subject: [PATCH 1409/3326] module: Add retpoline tag to VERMAGIC commit 6cfb521ac0d5b97470883ff9b7facae264b7ab12 upstream. Add a marker for retpoline to the module VERMAGIC. This catches the case when a non RETPOLINE compiled module gets loaded into a retpoline kernel, making it insecure. It doesn't handle the case when retpoline has been runtime disabled. Even in this case the match of the retcompile status will be enforced. This implies that even with retpoline run time disabled all modules loaded need to be recompiled. Signed-off-by: Andi Kleen Signed-off-by: Thomas Gleixner Reviewed-by: Greg Kroah-Hartman Acked-by: David Woodhouse Cc: rusty@rustcorp.com.au Cc: arjan.van.de.ven@intel.com Cc: jeyu@kernel.org Cc: torvalds@linux-foundation.org Link: https://lkml.kernel.org/r/20180116205228.4890-1-andi@firstfloor.org Signed-off-by: Greg Kroah-Hartman --- include/linux/vermagic.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index bae807eb2933f9..853291714ae0b9 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -31,11 +31,17 @@ #else #define MODULE_RANDSTRUCT_PLUGIN #endif +#ifdef RETPOLINE +#define MODULE_VERMAGIC_RETPOLINE "retpoline " +#else +#define MODULE_VERMAGIC_RETPOLINE "" +#endif #define VERMAGIC_STRING \ UTS_RELEASE " " \ MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ MODULE_ARCH_VERMAGIC \ - MODULE_RANDSTRUCT_PLUGIN + MODULE_RANDSTRUCT_PLUGIN \ + MODULE_VERMAGIC_RETPOLINE From 929e4b35023634b421605211c4e1c59b245edd3a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 16 Jan 2018 19:59:59 +0100 Subject: [PATCH 1410/3326] x86/intel_rdt/cqm: Prevent use after free commit d47924417319e3b6a728c0b690f183e75bc2a702 upstream. intel_rdt_iffline_cpu() -> domain_remove_cpu() frees memory first and then proceeds accessing it. BUG: KASAN: use-after-free in find_first_bit+0x1f/0x80 Read of size 8 at addr ffff883ff7c1e780 by task cpuhp/31/195 find_first_bit+0x1f/0x80 has_busy_rmid+0x47/0x70 intel_rdt_offline_cpu+0x4b4/0x510 Freed by task 195: kfree+0x94/0x1a0 intel_rdt_offline_cpu+0x17d/0x510 Do the teardown first and then free memory. Fixes: 24247aeeabe9 ("x86/intel_rdt/cqm: Improve limbo list processing") Reported-by: Joseph Salisbury Signed-off-by: Thomas Gleixner Cc: Ravi Shankar Cc: Peter Zilstra Cc: Stephane Eranian Cc: Vikas Shivappa Cc: Andi Kleen Cc: "Roderick W. Smith" Cc: 1733662@bugs.launchpad.net Cc: Fenghua Yu Cc: Tony Luck Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801161957510.2366@nanos Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/intel_rdt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index 88dcf847901356..99442370de40d7 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -525,10 +525,6 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) */ if (static_branch_unlikely(&rdt_mon_enable_key)) rmdir_mondata_subdir_allrdtgrp(r, d->id); - kfree(d->ctrl_val); - kfree(d->rmid_busy_llc); - kfree(d->mbm_total); - kfree(d->mbm_local); list_del(&d->list); if (is_mbm_enabled()) cancel_delayed_work(&d->mbm_over); @@ -545,6 +541,10 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) cancel_delayed_work(&d->cqm_limbo); } + kfree(d->ctrl_val); + kfree(d->rmid_busy_llc); + kfree(d->mbm_total); + kfree(d->mbm_local); kfree(d); return; } From 239f28886dffc4fe0481889eb78df1126c8a84f2 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 12 Jan 2018 14:31:35 -0600 Subject: [PATCH 1411/3326] x86/mm/pkeys: Fix fill_sig_info_pkey commit beacd6f7ed5e2915959442245b3b2480c2e37490 upstream. SEGV_PKUERR is a signal specific si_code which happens to have the same numeric value as several others: BUS_MCEERR_AR, ILL_ILLTRP, FPE_FLTOVF, TRAP_HWBKPT, CLD_TRAPPED, POLL_ERR, SEGV_THREAD_ID, as such it is not safe to just test the si_code the signal number must also be tested to prevent a false positive in fill_sig_info_pkey. This error was by inspection, and BUS_MCEERR_AR appears to be a real candidate for confusion. So pass in si_signo and check for SIG_SEGV to verify that it is actually a SEGV_PKUERR Fixes: 019132ff3daf ("x86/mm/pkeys: Fill in pkey field in siginfo") Signed-off-by: "Eric W. Biederman" Signed-off-by: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: Dave Hansen Cc: Oleg Nesterov Cc: Al Viro Link: https://lkml.kernel.org/r/20180112203135.4669-2-ebiederm@xmission.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/fault.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 3109ba6c6edeed..b264b590eeec07 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -173,14 +173,15 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really * faulted on a pte with its pkey=4. */ -static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey) +static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info, + u32 *pkey) { /* This is effectively an #ifdef */ if (!boot_cpu_has(X86_FEATURE_OSPKE)) return; /* Fault not from Protection Keys: nothing to do */ - if (si_code != SEGV_PKUERR) + if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV)) return; /* * force_sig_info_fault() is called from a number of @@ -219,7 +220,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, lsb = PAGE_SHIFT; info.si_addr_lsb = lsb; - fill_sig_info_pkey(si_code, &info, pkey); + fill_sig_info_pkey(si_signo, si_code, &info, pkey); force_sig_info(si_signo, &info, tsk); } From ad2d62036bd00a55dfd9f76cf822213fd07c0e30 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 21 Dec 2017 16:18:21 -0800 Subject: [PATCH 1412/3326] x86/idt: Mark IDT tables __initconst commit 327867faa4d66628fcd92a843adb3345736a5313 upstream. const variables must use __initconst, not __initdata. Fix this up for the IDT tables, which got it consistently wrong. Fixes: 16bc18d895ce ("x86/idt: Move 32-bit idt_descr to C code") Signed-off-by: Andi Kleen Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20171222001821.2157-7-andi@firstfloor.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/idt.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c index 014cb2fc47fff2..236917bac5f234 100644 --- a/arch/x86/kernel/idt.c +++ b/arch/x86/kernel/idt.c @@ -56,7 +56,7 @@ struct idt_data { * Early traps running on the DEFAULT_STACK because the other interrupt * stacks work only after cpu_init(). */ -static const __initdata struct idt_data early_idts[] = { +static const __initconst struct idt_data early_idts[] = { INTG(X86_TRAP_DB, debug), SYSG(X86_TRAP_BP, int3), #ifdef CONFIG_X86_32 @@ -70,7 +70,7 @@ static const __initdata struct idt_data early_idts[] = { * the traps which use them are reinitialized with IST after cpu_init() has * set up TSS. */ -static const __initdata struct idt_data def_idts[] = { +static const __initconst struct idt_data def_idts[] = { INTG(X86_TRAP_DE, divide_error), INTG(X86_TRAP_NMI, nmi), INTG(X86_TRAP_BR, bounds), @@ -108,7 +108,7 @@ static const __initdata struct idt_data def_idts[] = { /* * The APIC and SMP idt entries */ -static const __initdata struct idt_data apic_idts[] = { +static const __initconst struct idt_data apic_idts[] = { #ifdef CONFIG_SMP INTG(RESCHEDULE_VECTOR, reschedule_interrupt), INTG(CALL_FUNCTION_VECTOR, call_function_interrupt), @@ -150,7 +150,7 @@ static const __initdata struct idt_data apic_idts[] = { * Early traps running on the DEFAULT_STACK because the other interrupt * stacks work only after cpu_init(). */ -static const __initdata struct idt_data early_pf_idts[] = { +static const __initconst struct idt_data early_pf_idts[] = { INTG(X86_TRAP_PF, page_fault), }; @@ -158,7 +158,7 @@ static const __initdata struct idt_data early_pf_idts[] = { * Override for the debug_idt. Same as the default, but with interrupt * stack set to DEFAULT_STACK (0). Required for NMI trap handling. */ -static const __initdata struct idt_data dbg_idts[] = { +static const __initconst struct idt_data dbg_idts[] = { INTG(X86_TRAP_DB, debug), INTG(X86_TRAP_BP, int3), }; @@ -180,7 +180,7 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss; * The exceptions which use Interrupt stacks. They are setup after * cpu_init() when the TSS has been initialized. */ -static const __initdata struct idt_data ist_idts[] = { +static const __initconst struct idt_data ist_idts[] = { ISTG(X86_TRAP_DB, debug, DEBUG_STACK), ISTG(X86_TRAP_NMI, nmi, NMI_STACK), SISTG(X86_TRAP_BP, int3, DEBUG_STACK), From a5ab7b5de103baeeaa35ab93d95578059a5194d1 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Fri, 22 Dec 2017 00:27:54 -0500 Subject: [PATCH 1413/3326] x86/tsc: Future-proof native_calibrate_tsc() commit da4ae6c4a0b8dee5a5377a385545d2250fa8cddb upstream. If the crystal frequency cannot be determined via CPUID(15).crystal_khz or the built-in table then native_calibrate_tsc() will still set the X86_FEATURE_TSC_KNOWN_FREQ flag which prevents the refined TSC calibration. As a consequence such systems use cpu_khz for the TSC frequency which is incorrect when cpu_khz != tsc_khz resulting in time drift. Return early when the crystal frequency cannot be retrieved without setting the X86_FEATURE_TSC_KNOWN_FREQ flag. This ensures that the refined TSC calibration is invoked. [ tglx: Steam-blastered changelog. Sigh ] Fixes: 4ca4df0b7eb0 ("x86/tsc: Mark TSC frequency determined by CPUID as known") Signed-off-by: Len Brown Signed-off-by: Thomas Gleixner Cc: peterz@infradead.org Cc: Bin Gao Link: https://lkml.kernel.org/r/0fe2503aa7d7fc69137141fc705541a78101d2b9.1513920414.git.len.brown@intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/tsc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ad2b925a808e73..56a5766e21456f 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -612,6 +612,8 @@ unsigned long native_calibrate_tsc(void) } } + if (crystal_khz == 0) + return 0; /* * TSC frequency determined by CPUID is a "hardware reported" * frequency and is the most accurate one so far we have. This From 8352a3fec216b76b645f583f294efc711b3ca1bd Mon Sep 17 00:00:00 2001 From: Len Brown Date: Fri, 22 Dec 2017 00:27:55 -0500 Subject: [PATCH 1414/3326] x86/tsc: Fix erroneous TSC rate on Skylake Xeon commit b511203093489eb1829cb4de86e8214752205ac6 upstream. The INTEL_FAM6_SKYLAKE_X hardcoded crystal_khz value of 25MHZ is problematic: - SKX workstations (with same model # as server variants) use a 24 MHz crystal. This results in a -4.0% time drift rate on SKX workstations. - SKX servers subject the crystal to an EMI reduction circuit that reduces its actual frequency by (approximately) -0.25%. This results in -1 second per 10 minute time drift as compared to network time. This issue can also trigger a timer and power problem, on configurations that use the LAPIC timer (versus the TSC deadline timer). Clock ticks scheduled with the LAPIC timer arrive a few usec before the time they are expected (according to the slow TSC). This causes Linux to poll-idle, when it should be in an idle power saving state. The idle and clock code do not graciously recover from this error, sometimes resulting in significant polling and measurable power impact. Stop using native_calibrate_tsc() for INTEL_FAM6_SKYLAKE_X. native_calibrate_tsc() will return 0, boot will run with tsc_khz = cpu_khz, and the TSC refined calibration will update tsc_khz to correct for the difference. [ tglx: Sanitized change log ] Fixes: 6baf3d61821f ("x86/tsc: Add additional Intel CPU models to the crystal quirk list") Signed-off-by: Len Brown Signed-off-by: Thomas Gleixner Cc: peterz@infradead.org Cc: Prarit Bhargava Link: https://lkml.kernel.org/r/ff6dcea166e8ff8f2f6a03c17beab2cb436aa779.1513920414.git.len.brown@intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/tsc.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 56a5766e21456f..47506567435ee9 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -602,7 +602,6 @@ unsigned long native_calibrate_tsc(void) case INTEL_FAM6_KABYLAKE_DESKTOP: crystal_khz = 24000; /* 24.0 MHz */ break; - case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_ATOM_DENVERTON: crystal_khz = 25000; /* 25.0 MHz */ break; From e109607e141be9587612346b7cc56e030942042e Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Fri, 17 Nov 2017 15:29:21 -0800 Subject: [PATCH 1415/3326] pipe: avoid round_pipe_size() nr_pages overflow on 32-bit commit d3f14c485867cfb2e0c48aa88c41d0ef4bf5209c upstream. round_pipe_size() contains a right-bit-shift expression which may overflow, which would cause undefined results in a subsequent roundup_pow_of_two() call. static inline unsigned int round_pipe_size(unsigned int size) { unsigned long nr_pages; nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; return roundup_pow_of_two(nr_pages) << PAGE_SHIFT; } PAGE_SIZE is defined as (1UL << PAGE_SHIFT), so: - 4 bytes wide on 32-bit (0 to 0xffffffff) - 8 bytes wide on 64-bit (0 to 0xffffffffffffffff) That means that 32-bit round_pipe_size(), nr_pages may overflow to 0: size=0x00000000 nr_pages=0x0 size=0x00000001 nr_pages=0x1 size=0xfffff000 nr_pages=0xfffff size=0xfffff001 nr_pages=0x0 << ! size=0xffffffff nr_pages=0x0 << ! This is bad because roundup_pow_of_two(n) is undefined when n == 0! 64-bit is not a problem as the unsigned int size is 4 bytes wide (similar to 32-bit) and the larger, 8 byte wide unsigned long, is sufficient to handle the largest value of the bit shift expression: size=0xffffffff nr_pages=100000 Modify round_pipe_size() to return 0 if n == 0 and updates its callers to handle accordingly. Link: http://lkml.kernel.org/r/1507658689-11669-3-git-send-email-joe.lawrence@redhat.com Signed-off-by: Joe Lawrence Reported-by: Mikulas Patocka Reviewed-by: Mikulas Patocka Cc: Al Viro Cc: Jens Axboe Cc: Michael Kerrisk Cc: Randy Dunlap Cc: Josh Poimboeuf Signed-off-by: Andrew Morton Signed-off-by: Dong Jinguang Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/pipe.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/fs/pipe.c b/fs/pipe.c index 3909c55ed389bb..f0f4ab36c444d4 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -1018,13 +1018,19 @@ const struct file_operations pipefifo_fops = { /* * Currently we rely on the pipe array holding a power-of-2 number - * of pages. + * of pages. Returns 0 on error. */ static inline unsigned int round_pipe_size(unsigned int size) { unsigned long nr_pages; + if (size < pipe_min_size) + size = pipe_min_size; + nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (nr_pages == 0) + return 0; + return roundup_pow_of_two(nr_pages) << PAGE_SHIFT; } @@ -1040,6 +1046,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) long ret = 0; size = round_pipe_size(arg); + if (size == 0) + return -EINVAL; nr_pages = size >> PAGE_SHIFT; if (!nr_pages) @@ -1123,13 +1131,18 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf, size_t *lenp, loff_t *ppos) { + unsigned int rounded_pipe_max_size; int ret; ret = proc_douintvec_minmax(table, write, buf, lenp, ppos); if (ret < 0 || !write) return ret; - pipe_max_size = round_pipe_size(pipe_max_size); + rounded_pipe_max_size = round_pipe_size(pipe_max_size); + if (rounded_pipe_max_size == 0) + return -EINVAL; + + pipe_max_size = rounded_pipe_max_size; return ret; } From 0062c10afea4a477c099ca0ff7aec46228cb0709 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 16 Jan 2018 12:20:18 +0100 Subject: [PATCH 1416/3326] x86/apic/vector: Fix off by one in error path commit 45d55e7bac4028af93f5fa324e69958a0b868e96 upstream. Keith reported the following warning: WARNING: CPU: 28 PID: 1420 at kernel/irq/matrix.c:222 irq_matrix_remove_managed+0x10f/0x120 x86_vector_free_irqs+0xa1/0x180 x86_vector_alloc_irqs+0x1e4/0x3a0 msi_domain_alloc+0x62/0x130 The reason for this is that if the vector allocation fails the error handling code tries to free the failed vector as well, which causes the above imbalance warning to trigger. Adjust the error path to handle this correctly. Fixes: b5dc8e6c21e7 ("x86/irq: Use hierarchical irqdomain to manage CPU interrupt vectors") Reported-by: Keith Busch Signed-off-by: Thomas Gleixner Tested-by: Keith Busch Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801161217300.1823@nanos Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/apic/vector.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 88c214e75a6be0..2ce1c708b8ee39 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -369,8 +369,11 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, irq_data->hwirq = virq + i; err = assign_irq_vector_policy(virq + i, node, data, info, irq_data); - if (err) + if (err) { + irq_data->chip_data = NULL; + free_apic_chip_data(data); goto error; + } /* * If the apic destination mode is physical, then the * effective affinity is restricted to a single target @@ -383,7 +386,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, return 0; error: - x86_vector_free_irqs(domain, virq, i + 1); + x86_vector_free_irqs(domain, virq, i); return err; } From 25e119713d28611ed49bf3c518193930becb16f5 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 10 Jan 2018 13:25:56 -0600 Subject: [PATCH 1417/3326] x86/mm: Clean up register saving in the __enc_copy() assembly code commit 1303880179e67c59e801429b7e5d0f6b21137d99 upstream. Clean up the use of PUSH and POP and when registers are saved in the __enc_copy() assembly function in order to improve the readability of the code. Move parameter register saving into general purpose registers earlier in the code and move all the pushes to the beginning of the function with corresponding pops at the end. We do this to prepare fixes. Tested-by: Gabriel Craciunescu Signed-off-by: Tom Lendacky Reviewed-by: Borislav Petkov Cc: Borislav Petkov Cc: Brijesh Singh Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180110192556.6026.74187.stgit@tlendack-t1.amdoffice.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/mem_encrypt_boot.S | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S index 730e6d541df1d6..de36884611450f 100644 --- a/arch/x86/mm/mem_encrypt_boot.S +++ b/arch/x86/mm/mem_encrypt_boot.S @@ -103,20 +103,19 @@ ENTRY(__enc_copy) orq $X86_CR4_PGE, %rdx mov %rdx, %cr4 + push %r15 + + movq %rcx, %r9 /* Save kernel length */ + movq %rdi, %r10 /* Save encrypted kernel address */ + movq %rsi, %r11 /* Save decrypted kernel address */ + /* Set the PAT register PA5 entry to write-protect */ - push %rcx movl $MSR_IA32_CR_PAT, %ecx rdmsr - push %rdx /* Save original PAT value */ + mov %rdx, %r15 /* Save original PAT value */ andl $0xffff00ff, %edx /* Clear PA5 */ orl $0x00000500, %edx /* Set PA5 to WP */ wrmsr - pop %rdx /* RDX contains original PAT value */ - pop %rcx - - movq %rcx, %r9 /* Save kernel length */ - movq %rdi, %r10 /* Save encrypted kernel address */ - movq %rsi, %r11 /* Save decrypted kernel address */ wbinvd /* Invalidate any cache entries */ @@ -138,12 +137,13 @@ ENTRY(__enc_copy) jnz 1b /* Kernel length not zero? */ /* Restore PAT register */ - push %rdx /* Save original PAT value */ movl $MSR_IA32_CR_PAT, %ecx rdmsr - pop %rdx /* Restore original PAT value */ + mov %r15, %rdx /* Restore original PAT value */ wrmsr + pop %r15 + ret .L__enc_copy_end: ENDPROC(__enc_copy) From ce1805fdedd794bcd51b097f9dad1db2365b262f Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 10 Jan 2018 13:26:05 -0600 Subject: [PATCH 1418/3326] x86/mm: Use a struct to reduce parameters for SME PGD mapping commit bacf6b499e11760aef73a3bb5ce4e5eea74a3fd4 upstream. In preparation for follow-on patches, combine the PGD mapping parameters into a struct to reduce the number of function arguments and allow for direct updating of the next pagetable mapping area pointer. Tested-by: Gabriel Craciunescu Signed-off-by: Tom Lendacky Reviewed-by: Borislav Petkov Cc: Borislav Petkov Cc: Brijesh Singh Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180110192605.6026.96206.stgit@tlendack-t1.amdoffice.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/mem_encrypt.c | 90 ++++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 44 deletions(-) diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 0286327e65fa27..add396f2796f24 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -213,6 +213,14 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size) set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); } +struct sme_populate_pgd_data { + void *pgtable_area; + pgd_t *pgd; + + pmdval_t pmd_val; + unsigned long vaddr; +}; + static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start, unsigned long end) { @@ -235,15 +243,14 @@ static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start, #define PUD_FLAGS _KERNPG_TABLE_NOENC #define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) -static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, - unsigned long vaddr, pmdval_t pmd_val) +static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) { pgd_t *pgd_p; p4d_t *p4d_p; pud_t *pud_p; pmd_t *pmd_p; - pgd_p = pgd_base + pgd_index(vaddr); + pgd_p = ppd->pgd + pgd_index(ppd->vaddr); if (native_pgd_val(*pgd_p)) { if (IS_ENABLED(CONFIG_X86_5LEVEL)) p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); @@ -253,15 +260,15 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, pgd_t pgd; if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_p = pgtable_area; + p4d_p = ppd->pgtable_area; memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); - pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; + ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS); } else { - pud_p = pgtable_area; + pud_p = ppd->pgtable_area; memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); - pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; + ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS); } @@ -269,44 +276,41 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, } if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_p += p4d_index(vaddr); + p4d_p += p4d_index(ppd->vaddr); if (native_p4d_val(*p4d_p)) { pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK); } else { p4d_t p4d; - pud_p = pgtable_area; + pud_p = ppd->pgtable_area; memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); - pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; + ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS); native_set_p4d(p4d_p, p4d); } } - pud_p += pud_index(vaddr); + pud_p += pud_index(ppd->vaddr); if (native_pud_val(*pud_p)) { if (native_pud_val(*pud_p) & _PAGE_PSE) - goto out; + return; pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); } else { pud_t pud; - pmd_p = pgtable_area; + pmd_p = ppd->pgtable_area; memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); - pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; + ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS); native_set_pud(pud_p, pud); } - pmd_p += pmd_index(vaddr); + pmd_p += pmd_index(ppd->vaddr); if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) - native_set_pmd(pmd_p, native_make_pmd(pmd_val)); - -out: - return pgtable_area; + native_set_pmd(pmd_p, native_make_pmd(ppd->pmd_val)); } static unsigned long __init sme_pgtable_calc(unsigned long len) @@ -364,11 +368,10 @@ void __init sme_encrypt_kernel(void) unsigned long workarea_start, workarea_end, workarea_len; unsigned long execute_start, execute_end, execute_len; unsigned long kernel_start, kernel_end, kernel_len; + struct sme_populate_pgd_data ppd; unsigned long pgtable_area_len; unsigned long paddr, pmd_flags; unsigned long decrypted_base; - void *pgtable_area; - pgd_t *pgd; if (!sme_active()) return; @@ -432,18 +435,18 @@ void __init sme_encrypt_kernel(void) * pagetables and when the new encrypted and decrypted kernel * mappings are populated. */ - pgtable_area = (void *)execute_end; + ppd.pgtable_area = (void *)execute_end; /* * Make sure the current pagetable structure has entries for * addressing the workarea. */ - pgd = (pgd_t *)native_read_cr3_pa(); + ppd.pgd = (pgd_t *)native_read_cr3_pa(); paddr = workarea_start; while (paddr < workarea_end) { - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr, - paddr + PMD_FLAGS); + ppd.pmd_val = paddr + PMD_FLAGS; + ppd.vaddr = paddr; + sme_populate_pgd_large(&ppd); paddr += PMD_PAGE_SIZE; } @@ -457,17 +460,17 @@ void __init sme_encrypt_kernel(void) * populated with new PUDs and PMDs as the encrypted and decrypted * kernel mappings are created. */ - pgd = pgtable_area; - memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD); - pgtable_area += sizeof(*pgd) * PTRS_PER_PGD; + ppd.pgd = ppd.pgtable_area; + memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); + ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD; /* Add encrypted kernel (identity) mappings */ pmd_flags = PMD_FLAGS | _PAGE_ENC; paddr = kernel_start; while (paddr < kernel_end) { - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr, - paddr + pmd_flags); + ppd.pmd_val = paddr + pmd_flags; + ppd.vaddr = paddr; + sme_populate_pgd_large(&ppd); paddr += PMD_PAGE_SIZE; } @@ -485,9 +488,9 @@ void __init sme_encrypt_kernel(void) pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT); paddr = kernel_start; while (paddr < kernel_end) { - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr + decrypted_base, - paddr + pmd_flags); + ppd.pmd_val = paddr + pmd_flags; + ppd.vaddr = paddr + decrypted_base; + sme_populate_pgd_large(&ppd); paddr += PMD_PAGE_SIZE; } @@ -495,30 +498,29 @@ void __init sme_encrypt_kernel(void) /* Add decrypted workarea mappings to both kernel mappings */ paddr = workarea_start; while (paddr < workarea_end) { - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr, - paddr + PMD_FLAGS); + ppd.pmd_val = paddr + PMD_FLAGS; + ppd.vaddr = paddr; + sme_populate_pgd_large(&ppd); - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr + decrypted_base, - paddr + PMD_FLAGS); + ppd.vaddr = paddr + decrypted_base; + sme_populate_pgd_large(&ppd); paddr += PMD_PAGE_SIZE; } /* Perform the encryption */ sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, - kernel_len, workarea_start, (unsigned long)pgd); + kernel_len, workarea_start, (unsigned long)ppd.pgd); /* * At this point we are running encrypted. Remove the mappings for * the decrypted areas - all that is needed for this is to remove * the PGD entry/entries. */ - sme_clear_pgd(pgd, kernel_start + decrypted_base, + sme_clear_pgd(ppd.pgd, kernel_start + decrypted_base, kernel_end + decrypted_base); - sme_clear_pgd(pgd, workarea_start + decrypted_base, + sme_clear_pgd(ppd.pgd, workarea_start + decrypted_base, workarea_end + decrypted_base); /* Flush the TLB - no globals so cr3 is enough */ From 69a39cf36ec2094912d493dfac6747f00f8c2320 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 10 Jan 2018 13:26:16 -0600 Subject: [PATCH 1419/3326] x86/mm: Centralize PMD flags in sme_encrypt_kernel() commit 2b5d00b6c2cdd94f6d6a494a6f6c0c0fc7b8e711 upstream. In preparation for encrypting more than just the kernel during early boot processing, centralize the use of the PMD flag settings based on the type of mapping desired. When 4KB aligned encryption is added, this will allow either PTE flags or large page PMD flags to be used without requiring the caller to adjust. Tested-by: Gabriel Craciunescu Signed-off-by: Tom Lendacky Reviewed-by: Borislav Petkov Cc: Borislav Petkov Cc: Brijesh Singh Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180110192615.6026.14767.stgit@tlendack-t1.amdoffice.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/mem_encrypt.c | 133 ++++++++++++++++++++++---------------- 1 file changed, 77 insertions(+), 56 deletions(-) diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index add396f2796f24..968ac0ae8033fc 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -217,31 +217,39 @@ struct sme_populate_pgd_data { void *pgtable_area; pgd_t *pgd; - pmdval_t pmd_val; + pmdval_t pmd_flags; + unsigned long paddr; + unsigned long vaddr; + unsigned long vaddr_end; }; -static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start, - unsigned long end) +static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) { unsigned long pgd_start, pgd_end, pgd_size; pgd_t *pgd_p; - pgd_start = start & PGDIR_MASK; - pgd_end = end & PGDIR_MASK; + pgd_start = ppd->vaddr & PGDIR_MASK; + pgd_end = ppd->vaddr_end & PGDIR_MASK; - pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1); - pgd_size *= sizeof(pgd_t); + pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t); - pgd_p = pgd_base + pgd_index(start); + pgd_p = ppd->pgd + pgd_index(ppd->vaddr); memset(pgd_p, 0, pgd_size); } -#define PGD_FLAGS _KERNPG_TABLE_NOENC -#define P4D_FLAGS _KERNPG_TABLE_NOENC -#define PUD_FLAGS _KERNPG_TABLE_NOENC -#define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) +#define PGD_FLAGS _KERNPG_TABLE_NOENC +#define P4D_FLAGS _KERNPG_TABLE_NOENC +#define PUD_FLAGS _KERNPG_TABLE_NOENC + +#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) + +#define PMD_FLAGS_DEC PMD_FLAGS_LARGE +#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ + (_PAGE_PAT | _PAGE_PWT)) + +#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC) static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) { @@ -310,7 +318,35 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) pmd_p += pmd_index(ppd->vaddr); if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) - native_set_pmd(pmd_p, native_make_pmd(ppd->pmd_val)); + native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags)); +} + +static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, + pmdval_t pmd_flags) +{ + ppd->pmd_flags = pmd_flags; + + while (ppd->vaddr < ppd->vaddr_end) { + sme_populate_pgd_large(ppd); + + ppd->vaddr += PMD_PAGE_SIZE; + ppd->paddr += PMD_PAGE_SIZE; + } +} + +static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) +{ + __sme_map_range(ppd, PMD_FLAGS_ENC); +} + +static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) +{ + __sme_map_range(ppd, PMD_FLAGS_DEC); +} + +static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) +{ + __sme_map_range(ppd, PMD_FLAGS_DEC_WP); } static unsigned long __init sme_pgtable_calc(unsigned long len) @@ -370,7 +406,6 @@ void __init sme_encrypt_kernel(void) unsigned long kernel_start, kernel_end, kernel_len; struct sme_populate_pgd_data ppd; unsigned long pgtable_area_len; - unsigned long paddr, pmd_flags; unsigned long decrypted_base; if (!sme_active()) @@ -442,14 +477,10 @@ void __init sme_encrypt_kernel(void) * addressing the workarea. */ ppd.pgd = (pgd_t *)native_read_cr3_pa(); - paddr = workarea_start; - while (paddr < workarea_end) { - ppd.pmd_val = paddr + PMD_FLAGS; - ppd.vaddr = paddr; - sme_populate_pgd_large(&ppd); - - paddr += PMD_PAGE_SIZE; - } + ppd.paddr = workarea_start; + ppd.vaddr = workarea_start; + ppd.vaddr_end = workarea_end; + sme_map_range_decrypted(&ppd); /* Flush the TLB - no globals so cr3 is enough */ native_write_cr3(__native_read_cr3()); @@ -464,17 +495,6 @@ void __init sme_encrypt_kernel(void) memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD; - /* Add encrypted kernel (identity) mappings */ - pmd_flags = PMD_FLAGS | _PAGE_ENC; - paddr = kernel_start; - while (paddr < kernel_end) { - ppd.pmd_val = paddr + pmd_flags; - ppd.vaddr = paddr; - sme_populate_pgd_large(&ppd); - - paddr += PMD_PAGE_SIZE; - } - /* * A different PGD index/entry must be used to get different * pagetable entries for the decrypted mapping. Choose the next @@ -484,29 +504,28 @@ void __init sme_encrypt_kernel(void) decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1); decrypted_base <<= PGDIR_SHIFT; + /* Add encrypted kernel (identity) mappings */ + ppd.paddr = kernel_start; + ppd.vaddr = kernel_start; + ppd.vaddr_end = kernel_end; + sme_map_range_encrypted(&ppd); + /* Add decrypted, write-protected kernel (non-identity) mappings */ - pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT); - paddr = kernel_start; - while (paddr < kernel_end) { - ppd.pmd_val = paddr + pmd_flags; - ppd.vaddr = paddr + decrypted_base; - sme_populate_pgd_large(&ppd); - - paddr += PMD_PAGE_SIZE; - } + ppd.paddr = kernel_start; + ppd.vaddr = kernel_start + decrypted_base; + ppd.vaddr_end = kernel_end + decrypted_base; + sme_map_range_decrypted_wp(&ppd); /* Add decrypted workarea mappings to both kernel mappings */ - paddr = workarea_start; - while (paddr < workarea_end) { - ppd.pmd_val = paddr + PMD_FLAGS; - ppd.vaddr = paddr; - sme_populate_pgd_large(&ppd); - - ppd.vaddr = paddr + decrypted_base; - sme_populate_pgd_large(&ppd); + ppd.paddr = workarea_start; + ppd.vaddr = workarea_start; + ppd.vaddr_end = workarea_end; + sme_map_range_decrypted(&ppd); - paddr += PMD_PAGE_SIZE; - } + ppd.paddr = workarea_start; + ppd.vaddr = workarea_start + decrypted_base; + ppd.vaddr_end = workarea_end + decrypted_base; + sme_map_range_decrypted(&ppd); /* Perform the encryption */ sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, @@ -517,11 +536,13 @@ void __init sme_encrypt_kernel(void) * the decrypted areas - all that is needed for this is to remove * the PGD entry/entries. */ - sme_clear_pgd(ppd.pgd, kernel_start + decrypted_base, - kernel_end + decrypted_base); + ppd.vaddr = kernel_start + decrypted_base; + ppd.vaddr_end = kernel_end + decrypted_base; + sme_clear_pgd(&ppd); - sme_clear_pgd(ppd.pgd, workarea_start + decrypted_base, - workarea_end + decrypted_base); + ppd.vaddr = workarea_start + decrypted_base; + ppd.vaddr_end = workarea_end + decrypted_base; + sme_clear_pgd(&ppd); /* Flush the TLB - no globals so cr3 is enough */ native_write_cr3(__native_read_cr3()); From 33e4ca36aea63c87f15e393d34a0fa52d8337ea0 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 10 Jan 2018 13:26:26 -0600 Subject: [PATCH 1420/3326] x86/mm: Prepare sme_encrypt_kernel() for PAGE aligned encryption commit cc5f01e28d6c60f274fd1e33b245f679f79f543c upstream. In preparation for encrypting more than just the kernel, the encryption support in sme_encrypt_kernel() needs to support 4KB page aligned encryption instead of just 2MB large page aligned encryption. Update the routines that populate the PGD to support non-2MB aligned addresses. This is done by creating PTE page tables for the start and end portion of the address range that fall outside of the 2MB alignment. This results in, at most, two extra pages to hold the PTE entries for each mapping of a range. Tested-by: Gabriel Craciunescu Signed-off-by: Tom Lendacky Reviewed-by: Borislav Petkov Cc: Borislav Petkov Cc: Brijesh Singh Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180110192626.6026.75387.stgit@tlendack-t1.amdoffice.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/mem_encrypt.c | 123 ++++++++++++++++++++++++++++----- arch/x86/mm/mem_encrypt_boot.S | 20 ++++-- 2 files changed, 121 insertions(+), 22 deletions(-) diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 968ac0ae8033fc..b4f15fb2da8f30 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -218,6 +218,7 @@ struct sme_populate_pgd_data { pgd_t *pgd; pmdval_t pmd_flags; + pteval_t pte_flags; unsigned long paddr; unsigned long vaddr; @@ -242,6 +243,7 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) #define PGD_FLAGS _KERNPG_TABLE_NOENC #define P4D_FLAGS _KERNPG_TABLE_NOENC #define PUD_FLAGS _KERNPG_TABLE_NOENC +#define PMD_FLAGS _KERNPG_TABLE_NOENC #define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) @@ -251,7 +253,15 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) #define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC) -static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) +#define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL) + +#define PTE_FLAGS_DEC PTE_FLAGS +#define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ + (_PAGE_PAT | _PAGE_PWT)) + +#define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC) + +static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) { pgd_t *pgd_p; p4d_t *p4d_p; @@ -302,7 +312,7 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) pud_p += pud_index(ppd->vaddr); if (native_pud_val(*pud_p)) { if (native_pud_val(*pud_p) & _PAGE_PSE) - return; + return NULL; pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); } else { @@ -316,16 +326,55 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) native_set_pud(pud_p, pud); } + return pmd_p; +} + +static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) +{ + pmd_t *pmd_p; + + pmd_p = sme_prepare_pgd(ppd); + if (!pmd_p) + return; + pmd_p += pmd_index(ppd->vaddr); if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags)); } -static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, - pmdval_t pmd_flags) +static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) { - ppd->pmd_flags = pmd_flags; + pmd_t *pmd_p; + pte_t *pte_p; + + pmd_p = sme_prepare_pgd(ppd); + if (!pmd_p) + return; + + pmd_p += pmd_index(ppd->vaddr); + if (native_pmd_val(*pmd_p)) { + if (native_pmd_val(*pmd_p) & _PAGE_PSE) + return; + + pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK); + } else { + pmd_t pmd; + pte_p = ppd->pgtable_area; + memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE); + ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE; + + pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS); + native_set_pmd(pmd_p, pmd); + } + + pte_p += pte_index(ppd->vaddr); + if (!native_pte_val(*pte_p)) + native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags)); +} + +static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) +{ while (ppd->vaddr < ppd->vaddr_end) { sme_populate_pgd_large(ppd); @@ -334,33 +383,71 @@ static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, } } +static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd) +{ + while (ppd->vaddr < ppd->vaddr_end) { + sme_populate_pgd(ppd); + + ppd->vaddr += PAGE_SIZE; + ppd->paddr += PAGE_SIZE; + } +} + +static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, + pmdval_t pmd_flags, pteval_t pte_flags) +{ + unsigned long vaddr_end; + + ppd->pmd_flags = pmd_flags; + ppd->pte_flags = pte_flags; + + /* Save original end value since we modify the struct value */ + vaddr_end = ppd->vaddr_end; + + /* If start is not 2MB aligned, create PTE entries */ + ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE); + __sme_map_range_pte(ppd); + + /* Create PMD entries */ + ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK; + __sme_map_range_pmd(ppd); + + /* If end is not 2MB aligned, create PTE entries */ + ppd->vaddr_end = vaddr_end; + __sme_map_range_pte(ppd); +} + static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) { - __sme_map_range(ppd, PMD_FLAGS_ENC); + __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC); } static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) { - __sme_map_range(ppd, PMD_FLAGS_DEC); + __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC); } static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) { - __sme_map_range(ppd, PMD_FLAGS_DEC_WP); + __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP); } static unsigned long __init sme_pgtable_calc(unsigned long len) { - unsigned long p4d_size, pud_size, pmd_size; + unsigned long p4d_size, pud_size, pmd_size, pte_size; unsigned long total; /* * Perform a relatively simplistic calculation of the pagetable - * entries that are needed. That mappings will be covered by 2MB - * PMD entries so we can conservatively calculate the required + * entries that are needed. Those mappings will be covered mostly + * by 2MB PMD entries so we can conservatively calculate the required * number of P4D, PUD and PMD structures needed to perform the - * mappings. Incrementing the count for each covers the case where - * the addresses cross entries. + * mappings. For mappings that are not 2MB aligned, PTE mappings + * would be needed for the start and end portion of the address range + * that fall outside of the 2MB alignment. This results in, at most, + * two extra pages to hold PTE entries for each range that is mapped. + * Incrementing the count for each covers the case where the addresses + * cross entries. */ if (IS_ENABLED(CONFIG_X86_5LEVEL)) { p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; @@ -374,8 +461,9 @@ static unsigned long __init sme_pgtable_calc(unsigned long len) } pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1; pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; + pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE; - total = p4d_size + pud_size + pmd_size; + total = p4d_size + pud_size + pmd_size + pte_size; /* * Now calculate the added pagetable structures needed to populate @@ -458,10 +546,13 @@ void __init sme_encrypt_kernel(void) /* * The total workarea includes the executable encryption area and - * the pagetable area. + * the pagetable area. The start of the workarea is already 2MB + * aligned, align the end of the workarea on a 2MB boundary so that + * we don't try to create/allocate PTE entries from the workarea + * before it is mapped. */ workarea_len = execute_len + pgtable_area_len; - workarea_end = workarea_start + workarea_len; + workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE); /* * Set the address to the start of where newly created pagetable diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S index de36884611450f..23a8a9e411ea05 100644 --- a/arch/x86/mm/mem_encrypt_boot.S +++ b/arch/x86/mm/mem_encrypt_boot.S @@ -104,6 +104,7 @@ ENTRY(__enc_copy) mov %rdx, %cr4 push %r15 + push %r12 movq %rcx, %r9 /* Save kernel length */ movq %rdi, %r10 /* Save encrypted kernel address */ @@ -119,21 +120,27 @@ ENTRY(__enc_copy) wbinvd /* Invalidate any cache entries */ - /* Copy/encrypt 2MB at a time */ + /* Copy/encrypt up to 2MB at a time */ + movq $PMD_PAGE_SIZE, %r12 1: + cmpq %r12, %r9 + jnb 2f + movq %r9, %r12 + +2: movq %r11, %rsi /* Source - decrypted kernel */ movq %r8, %rdi /* Dest - intermediate copy buffer */ - movq $PMD_PAGE_SIZE, %rcx /* 2MB length */ + movq %r12, %rcx rep movsb movq %r8, %rsi /* Source - intermediate copy buffer */ movq %r10, %rdi /* Dest - encrypted kernel */ - movq $PMD_PAGE_SIZE, %rcx /* 2MB length */ + movq %r12, %rcx rep movsb - addq $PMD_PAGE_SIZE, %r11 - addq $PMD_PAGE_SIZE, %r10 - subq $PMD_PAGE_SIZE, %r9 /* Kernel length decrement */ + addq %r12, %r11 + addq %r12, %r10 + subq %r12, %r9 /* Kernel length decrement */ jnz 1b /* Kernel length not zero? */ /* Restore PAT register */ @@ -142,6 +149,7 @@ ENTRY(__enc_copy) mov %r15, %rdx /* Restore original PAT value */ wrmsr + pop %r12 pop %r15 ret From 5455eef0f31ee57b02c059e998d8086975292024 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 30 Oct 2017 11:11:03 +0200 Subject: [PATCH 1421/3326] ARM: OMAP3: hwmod_data: add missing module_offs for MMC3 commit 3c4d296e58a23687f2076d8ad531e6ae2b725846 upstream. MMC3 hwmod data is missing the module_offs definition. MMC3 belongs under core, so add CORE_MOD for it. Signed-off-by: Tero Kristo Signed-off-by: Tony Lindgren Cc: Adam Ford Fixes: 6c0afb503937 ("clk: ti: convert to use proper register definition for all accesses") Signed-off-by: Greg Kroah-Hartman --- arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index c3276436b0aead..c12e7b572a4195 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -1656,6 +1656,7 @@ static struct omap_hwmod omap3xxx_mmc3_hwmod = { .main_clk = "mmchs3_fck", .prcm = { .omap2 = { + .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MMC3_SHIFT, .idlest_reg_id = 1, From e0f82cbde41353ddf63935175583ceb5580cce87 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 10 Jan 2018 13:26:34 -0600 Subject: [PATCH 1422/3326] x86/mm: Encrypt the initrd earlier for BSP microcode update commit 107cd2532181b96c549e8f224cdcca8631c3076b upstream. Currently the BSP microcode update code examines the initrd very early in the boot process. If SME is active, the initrd is treated as being encrypted but it has not been encrypted (in place) yet. Update the early boot code that encrypts the kernel to also encrypt the initrd so that early BSP microcode updates work. Tested-by: Gabriel Craciunescu Signed-off-by: Tom Lendacky Reviewed-by: Borislav Petkov Cc: Borislav Petkov Cc: Brijesh Singh Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180110192634.6026.10452.stgit@tlendack-t1.amdoffice.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mem_encrypt.h | 4 +- arch/x86/kernel/head64.c | 4 +- arch/x86/kernel/setup.c | 8 ---- arch/x86/mm/mem_encrypt.c | 66 ++++++++++++++++++++++++++---- arch/x86/mm/mem_encrypt_boot.S | 46 ++++++++++----------- 5 files changed, 85 insertions(+), 43 deletions(-) diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 6a77c63540f758..e7d96c0766fe17 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -39,7 +39,7 @@ void __init sme_unmap_bootdata(char *real_mode_data); void __init sme_early_init(void); -void __init sme_encrypt_kernel(void); +void __init sme_encrypt_kernel(struct boot_params *bp); void __init sme_enable(struct boot_params *bp); /* Architecture __weak replacement functions */ @@ -61,7 +61,7 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { } static inline void __init sme_early_init(void) { } -static inline void __init sme_encrypt_kernel(void) { } +static inline void __init sme_encrypt_kernel(struct boot_params *bp) { } static inline void __init sme_enable(struct boot_params *bp) { } #endif /* CONFIG_AMD_MEM_ENCRYPT */ diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 6a5d757b9cfdc7..7ba5d819ebe3b3 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -157,8 +157,8 @@ unsigned long __head __startup_64(unsigned long physaddr, p = fixup_pointer(&phys_base, physaddr); *p += load_delta - sme_get_me_mask(); - /* Encrypt the kernel (if SME is active) */ - sme_encrypt_kernel(); + /* Encrypt the kernel and related (if SME is active) */ + sme_encrypt_kernel(bp); /* * Return the SME encryption mask (if SME is active) to be used as a diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 0957dd73d12755..e84cb4c75cd0eb 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -376,14 +376,6 @@ static void __init reserve_initrd(void) !ramdisk_image || !ramdisk_size) return; /* No initrd provided by bootloader */ - /* - * If SME is active, this memory will be marked encrypted by the - * kernel when it is accessed (including relocation). However, the - * ramdisk image was loaded decrypted by the bootloader, so make - * sure that it is encrypted before accessing it. - */ - sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image); - initrd_start = 0; mapped_size = memblock_mem_size(max_pfn_mapped); diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index b4f15fb2da8f30..e110fab33ed480 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -487,11 +487,12 @@ static unsigned long __init sme_pgtable_calc(unsigned long len) return total; } -void __init sme_encrypt_kernel(void) +void __init sme_encrypt_kernel(struct boot_params *bp) { unsigned long workarea_start, workarea_end, workarea_len; unsigned long execute_start, execute_end, execute_len; unsigned long kernel_start, kernel_end, kernel_len; + unsigned long initrd_start, initrd_end, initrd_len; struct sme_populate_pgd_data ppd; unsigned long pgtable_area_len; unsigned long decrypted_base; @@ -500,14 +501,15 @@ void __init sme_encrypt_kernel(void) return; /* - * Prepare for encrypting the kernel by building new pagetables with - * the necessary attributes needed to encrypt the kernel in place. + * Prepare for encrypting the kernel and initrd by building new + * pagetables with the necessary attributes needed to encrypt the + * kernel in place. * * One range of virtual addresses will map the memory occupied - * by the kernel as encrypted. + * by the kernel and initrd as encrypted. * * Another range of virtual addresses will map the memory occupied - * by the kernel as decrypted and write-protected. + * by the kernel and initrd as decrypted and write-protected. * * The use of write-protect attribute will prevent any of the * memory from being cached. @@ -518,6 +520,20 @@ void __init sme_encrypt_kernel(void) kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE); kernel_len = kernel_end - kernel_start; + initrd_start = 0; + initrd_end = 0; + initrd_len = 0; +#ifdef CONFIG_BLK_DEV_INITRD + initrd_len = (unsigned long)bp->hdr.ramdisk_size | + ((unsigned long)bp->ext_ramdisk_size << 32); + if (initrd_len) { + initrd_start = (unsigned long)bp->hdr.ramdisk_image | + ((unsigned long)bp->ext_ramdisk_image << 32); + initrd_end = PAGE_ALIGN(initrd_start + initrd_len); + initrd_len = initrd_end - initrd_start; + } +#endif + /* Set the encryption workarea to be immediately after the kernel */ workarea_start = kernel_end; @@ -540,6 +556,8 @@ void __init sme_encrypt_kernel(void) */ pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD; pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2; + if (initrd_len) + pgtable_area_len += sme_pgtable_calc(initrd_len) * 2; /* PUDs and PMDs needed in the current pagetables for the workarea */ pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len); @@ -578,9 +596,9 @@ void __init sme_encrypt_kernel(void) /* * A new pagetable structure is being built to allow for the kernel - * to be encrypted. It starts with an empty PGD that will then be - * populated with new PUDs and PMDs as the encrypted and decrypted - * kernel mappings are created. + * and initrd to be encrypted. It starts with an empty PGD that will + * then be populated with new PUDs and PMDs as the encrypted and + * decrypted kernel mappings are created. */ ppd.pgd = ppd.pgtable_area; memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); @@ -593,6 +611,12 @@ void __init sme_encrypt_kernel(void) * the base of the mapping. */ decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1); + if (initrd_len) { + unsigned long check_base; + + check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1); + decrypted_base = max(decrypted_base, check_base); + } decrypted_base <<= PGDIR_SHIFT; /* Add encrypted kernel (identity) mappings */ @@ -607,6 +631,21 @@ void __init sme_encrypt_kernel(void) ppd.vaddr_end = kernel_end + decrypted_base; sme_map_range_decrypted_wp(&ppd); + if (initrd_len) { + /* Add encrypted initrd (identity) mappings */ + ppd.paddr = initrd_start; + ppd.vaddr = initrd_start; + ppd.vaddr_end = initrd_end; + sme_map_range_encrypted(&ppd); + /* + * Add decrypted, write-protected initrd (non-identity) mappings + */ + ppd.paddr = initrd_start; + ppd.vaddr = initrd_start + decrypted_base; + ppd.vaddr_end = initrd_end + decrypted_base; + sme_map_range_decrypted_wp(&ppd); + } + /* Add decrypted workarea mappings to both kernel mappings */ ppd.paddr = workarea_start; ppd.vaddr = workarea_start; @@ -622,6 +661,11 @@ void __init sme_encrypt_kernel(void) sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, kernel_len, workarea_start, (unsigned long)ppd.pgd); + if (initrd_len) + sme_encrypt_execute(initrd_start, initrd_start + decrypted_base, + initrd_len, workarea_start, + (unsigned long)ppd.pgd); + /* * At this point we are running encrypted. Remove the mappings for * the decrypted areas - all that is needed for this is to remove @@ -631,6 +675,12 @@ void __init sme_encrypt_kernel(void) ppd.vaddr_end = kernel_end + decrypted_base; sme_clear_pgd(&ppd); + if (initrd_len) { + ppd.vaddr = initrd_start + decrypted_base; + ppd.vaddr_end = initrd_end + decrypted_base; + sme_clear_pgd(&ppd); + } + ppd.vaddr = workarea_start + decrypted_base; ppd.vaddr_end = workarea_end + decrypted_base; sme_clear_pgd(&ppd); diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S index 23a8a9e411ea05..01f682cf77a8b3 100644 --- a/arch/x86/mm/mem_encrypt_boot.S +++ b/arch/x86/mm/mem_encrypt_boot.S @@ -22,9 +22,9 @@ ENTRY(sme_encrypt_execute) /* * Entry parameters: - * RDI - virtual address for the encrypted kernel mapping - * RSI - virtual address for the decrypted kernel mapping - * RDX - length of kernel + * RDI - virtual address for the encrypted mapping + * RSI - virtual address for the decrypted mapping + * RDX - length to encrypt * RCX - virtual address of the encryption workarea, including: * - stack page (PAGE_SIZE) * - encryption routine page (PAGE_SIZE) @@ -41,9 +41,9 @@ ENTRY(sme_encrypt_execute) addq $PAGE_SIZE, %rax /* Workarea encryption routine */ push %r12 - movq %rdi, %r10 /* Encrypted kernel */ - movq %rsi, %r11 /* Decrypted kernel */ - movq %rdx, %r12 /* Kernel length */ + movq %rdi, %r10 /* Encrypted area */ + movq %rsi, %r11 /* Decrypted area */ + movq %rdx, %r12 /* Area length */ /* Copy encryption routine into the workarea */ movq %rax, %rdi /* Workarea encryption routine */ @@ -52,10 +52,10 @@ ENTRY(sme_encrypt_execute) rep movsb /* Setup registers for call */ - movq %r10, %rdi /* Encrypted kernel */ - movq %r11, %rsi /* Decrypted kernel */ + movq %r10, %rdi /* Encrypted area */ + movq %r11, %rsi /* Decrypted area */ movq %r8, %rdx /* Pagetables used for encryption */ - movq %r12, %rcx /* Kernel length */ + movq %r12, %rcx /* Area length */ movq %rax, %r8 /* Workarea encryption routine */ addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ @@ -71,7 +71,7 @@ ENDPROC(sme_encrypt_execute) ENTRY(__enc_copy) /* - * Routine used to encrypt kernel. + * Routine used to encrypt memory in place. * This routine must be run outside of the kernel proper since * the kernel will be encrypted during the process. So this * routine is defined here and then copied to an area outside @@ -79,19 +79,19 @@ ENTRY(__enc_copy) * during execution. * * On entry the registers must be: - * RDI - virtual address for the encrypted kernel mapping - * RSI - virtual address for the decrypted kernel mapping + * RDI - virtual address for the encrypted mapping + * RSI - virtual address for the decrypted mapping * RDX - address of the pagetables to use for encryption - * RCX - length of kernel + * RCX - length of area * R8 - intermediate copy buffer * * RAX - points to this routine * - * The kernel will be encrypted by copying from the non-encrypted - * kernel space to an intermediate buffer and then copying from the - * intermediate buffer back to the encrypted kernel space. The physical - * addresses of the two kernel space mappings are the same which - * results in the kernel being encrypted "in place". + * The area will be encrypted by copying from the non-encrypted + * memory space to an intermediate buffer and then copying from the + * intermediate buffer back to the encrypted memory space. The physical + * addresses of the two mappings are the same which results in the area + * being encrypted "in place". */ /* Enable the new page tables */ mov %rdx, %cr3 @@ -106,9 +106,9 @@ ENTRY(__enc_copy) push %r15 push %r12 - movq %rcx, %r9 /* Save kernel length */ - movq %rdi, %r10 /* Save encrypted kernel address */ - movq %rsi, %r11 /* Save decrypted kernel address */ + movq %rcx, %r9 /* Save area length */ + movq %rdi, %r10 /* Save encrypted area address */ + movq %rsi, %r11 /* Save decrypted area address */ /* Set the PAT register PA5 entry to write-protect */ movl $MSR_IA32_CR_PAT, %ecx @@ -128,13 +128,13 @@ ENTRY(__enc_copy) movq %r9, %r12 2: - movq %r11, %rsi /* Source - decrypted kernel */ + movq %r11, %rsi /* Source - decrypted area */ movq %r8, %rdi /* Dest - intermediate copy buffer */ movq %r12, %rcx rep movsb movq %r8, %rsi /* Source - intermediate copy buffer */ - movq %r10, %rdi /* Dest - encrypted kernel */ + movq %r10, %rdi /* Dest - encrypted area */ movq %r12, %rcx rep movsb From e86e5430b3b1be12e2287d9d7b931c58ba7d175c Mon Sep 17 00:00:00 2001 From: Nir Perry Date: Thu, 11 Jan 2018 23:43:26 -0800 Subject: [PATCH 1423/3326] Input: ALPS - fix multi-touch decoding on SS4 plus touchpads commit 4d94e776bd29670f01befa27e12df784fa05fa2e upstream. The fix for handling two-finger scroll (i4a646580f793 - "Input: ALPS - fix two-finger scroll breakage in right side on ALPS touchpad") introduced a minor "typo" that broke decoding of multi-touch events are decoded on some ALPS touchpads. For example, tapping with three-fingers can no longer be used to emulate middle-mouse-button (the kernel doesn't recognize this as the proper event, and doesn't report it correctly to userspace). This affects touchpads that use SS4 "plus" protocol variant, like those found on Dell E7270 & E7470 laptops (tested on E7270). First, probably the code in alps_decode_ss4_v2() for case SS4_PACKET_ID_MULTI used inconsistent indices to "f->mt[]". You can see 0 & 1 are used for the "if" part but 2 & 3 are used for the "else" part. Second, in the previous patch, new macros were introduced to decode X coordinates specific to the SS4 "plus" variant, but the macro to define the maximum X value wasn't changed accordingly. The macros to decode X values for "plus" variant are effectively shifted right by 1 bit, but the max wasn't shifted too. This causes the driver to incorrectly handle "no data" cases, which also interfered with how multi-touch was handled. Fixes: 4a646580f793 ("Input: ALPS - fix two-finger scroll breakage...") Signed-off-by: Nir Perry Reviewed-by: Masaki Ota Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/mouse/alps.c | 23 +++++++++++++---------- drivers/input/mouse/alps.h | 10 ++++++---- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 850b00e3ad8ecd..3d9c294e84db65 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1250,29 +1250,32 @@ static int alps_decode_ss4_v2(struct alps_fields *f, case SS4_PACKET_ID_MULTI: if (priv->flags & ALPS_BUTTONPAD) { if (IS_SS4PLUS_DEV(priv->dev_id)) { - f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0); - f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1); + f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0); + f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1); + no_data_x = SS4_PLUS_MFPACKET_NO_AX_BL; } else { f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); f->mt[3].x = SS4_BTL_MF_X_V2(p, 1); + no_data_x = SS4_MFPACKET_NO_AX_BL; } + no_data_y = SS4_MFPACKET_NO_AY_BL; f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); - no_data_x = SS4_MFPACKET_NO_AX_BL; - no_data_y = SS4_MFPACKET_NO_AY_BL; } else { if (IS_SS4PLUS_DEV(priv->dev_id)) { - f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0); - f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1); + f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0); + f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1); + no_data_x = SS4_PLUS_MFPACKET_NO_AX; } else { - f->mt[0].x = SS4_STD_MF_X_V2(p, 0); - f->mt[1].x = SS4_STD_MF_X_V2(p, 1); + f->mt[2].x = SS4_STD_MF_X_V2(p, 0); + f->mt[3].x = SS4_STD_MF_X_V2(p, 1); + no_data_x = SS4_MFPACKET_NO_AX; } + no_data_y = SS4_MFPACKET_NO_AY; + f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); - no_data_x = SS4_MFPACKET_NO_AX; - no_data_y = SS4_MFPACKET_NO_AY; } f->first_mp = 0; diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index c80a7c76cb7671..79b6d69d1486a2 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -141,10 +141,12 @@ enum SS4_PACKET_ID { #define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F) -#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */ -#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */ -#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coordinate value */ -#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coordinate value */ +#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */ +#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */ +#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coord value */ +#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coord value */ +#define SS4_PLUS_MFPACKET_NO_AX 4080 /* SS4 PLUS, X */ +#define SS4_PLUS_MFPACKET_NO_AX_BL 4088 /* Buttonless SS4 PLUS, X */ /* * enum V7_PACKET_ID - defines the packet type for V7 From b276eae8834746a00a62c56890ec93d1f54950ac Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Thu, 18 Jan 2018 11:36:41 -0800 Subject: [PATCH 1424/3326] Input: synaptics-rmi4 - prevent UAF reported by KASAN commit 55edde9fff1ae4114c893c572e641620c76c9c21 upstream. KASAN found a UAF due to dangling pointer. As the report below says, rmi_f11_attention() accesses drvdata->attn_data.data, which was freed in rmi_irq_fn. [ 311.424062] BUG: KASAN: use-after-free in rmi_f11_attention+0x526/0x5e0 [rmi_core] [ 311.424067] Read of size 27 at addr ffff88041fd610db by task irq/131-i2c_hid/1162 [ 311.424075] CPU: 0 PID: 1162 Comm: irq/131-i2c_hid Not tainted 4.15.0-rc8+ #2 [ 311.424076] Hardware name: Razer Blade Stealth/Razer, BIOS 6.05 01/26/2017 [ 311.424078] Call Trace: [ 311.424086] dump_stack+0xae/0x12d [ 311.424090] ? _atomic_dec_and_lock+0x103/0x103 [ 311.424094] ? show_regs_print_info+0xa/0xa [ 311.424099] ? input_handle_event+0x10b/0x810 [ 311.424104] print_address_description+0x65/0x229 [ 311.424108] kasan_report.cold.5+0xa7/0x281 [ 311.424117] rmi_f11_attention+0x526/0x5e0 [rmi_core] [ 311.424123] ? memcpy+0x1f/0x50 [ 311.424132] ? rmi_f11_attention+0x526/0x5e0 [rmi_core] [ 311.424143] ? rmi_f11_probe+0x1e20/0x1e20 [rmi_core] [ 311.424153] ? rmi_process_interrupt_requests+0x220/0x2a0 [rmi_core] [ 311.424163] ? rmi_irq_fn+0x22c/0x270 [rmi_core] [ 311.424173] ? rmi_process_interrupt_requests+0x2a0/0x2a0 [rmi_core] [ 311.424177] ? free_irq+0xa0/0xa0 [ 311.424180] ? irq_finalize_oneshot.part.39+0xeb/0x180 [ 311.424190] ? rmi_process_interrupt_requests+0x2a0/0x2a0 [rmi_core] [ 311.424193] ? irq_thread_fn+0x3d/0x80 [ 311.424197] ? irq_finalize_oneshot.part.39+0x180/0x180 [ 311.424200] ? irq_thread+0x21d/0x290 [ 311.424203] ? irq_thread_check_affinity+0x170/0x170 [ 311.424207] ? remove_wait_queue+0x150/0x150 [ 311.424212] ? kasan_unpoison_shadow+0x30/0x40 [ 311.424214] ? __init_waitqueue_head+0xa0/0xd0 [ 311.424218] ? task_non_contending.cold.55+0x18/0x18 [ 311.424221] ? irq_forced_thread_fn+0xa0/0xa0 [ 311.424226] ? irq_thread_check_affinity+0x170/0x170 [ 311.424230] ? kthread+0x19e/0x1c0 [ 311.424233] ? kthread_create_worker_on_cpu+0xc0/0xc0 [ 311.424237] ? ret_from_fork+0x32/0x40 [ 311.424244] Allocated by task 899: [ 311.424249] kasan_kmalloc+0xbf/0xe0 [ 311.424252] __kmalloc_track_caller+0xd9/0x1f0 [ 311.424255] kmemdup+0x17/0x40 [ 311.424264] rmi_set_attn_data+0xa4/0x1b0 [rmi_core] [ 311.424269] rmi_raw_event+0x10b/0x1f0 [hid_rmi] [ 311.424278] hid_input_report+0x1a8/0x2c0 [hid] [ 311.424283] i2c_hid_irq+0x146/0x1d0 [i2c_hid] [ 311.424286] irq_thread_fn+0x3d/0x80 [ 311.424288] irq_thread+0x21d/0x290 [ 311.424291] kthread+0x19e/0x1c0 [ 311.424293] ret_from_fork+0x32/0x40 [ 311.424296] Freed by task 1162: [ 311.424300] kasan_slab_free+0x71/0xc0 [ 311.424303] kfree+0x90/0x190 [ 311.424311] rmi_irq_fn+0x1b2/0x270 [rmi_core] [ 311.424319] rmi_irq_fn+0x257/0x270 [rmi_core] [ 311.424322] irq_thread_fn+0x3d/0x80 [ 311.424324] irq_thread+0x21d/0x290 [ 311.424327] kthread+0x19e/0x1c0 [ 311.424330] ret_from_fork+0x32/0x40 [ 311.424334] The buggy address belongs to the object at ffff88041fd610c0 which belongs to the cache kmalloc-64 of size 64 [ 311.424340] The buggy address is located 27 bytes inside of 64-byte region [ffff88041fd610c0, ffff88041fd61100) [ 311.424344] The buggy address belongs to the page: [ 311.424348] page:ffffea00107f5840 count:1 mapcount:0 mapping: (null) index:0x0 [ 311.424353] flags: 0x17ffffc0000100(slab) [ 311.424358] raw: 0017ffffc0000100 0000000000000000 0000000000000000 00000001802a002a [ 311.424363] raw: dead000000000100 dead000000000200 ffff8804228036c0 0000000000000000 [ 311.424366] page dumped because: kasan: bad access detected [ 311.424369] Memory state around the buggy address: [ 311.424373] ffff88041fd60f80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 [ 311.424377] ffff88041fd61000: fb fb fb fb fb fb fb fb fc fc fc fc fb fb fb fb [ 311.424381] >ffff88041fd61080: fb fb fb fb fc fc fc fc fb fb fb fb fb fb fb fb [ 311.424384] ^ [ 311.424387] ffff88041fd61100: fc fc fc fc fb fb fb fb fb fb fb fb fc fc fc fc [ 311.424391] ffff88041fd61180: fb fb fb fb fb fb fb fb fc fc fc fc fb fb fb fb Signed-off-by: Nick Desaulniers Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/rmi4/rmi_driver.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index 4f2bb5947a4ea1..141ea228aac6a3 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -230,8 +230,10 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id) rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Failed to process interrupt request: %d\n", ret); - if (count) + if (count) { kfree(attn_data.data); + attn_data.data = NULL; + } if (!kfifo_is_empty(&drvdata->attn_fifo)) return rmi_irq_fn(irq, dev_id); From 1bb19ef3ea52ddc5a24a84e456eac96b5b01dc08 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 8 Jan 2018 17:20:18 -0800 Subject: [PATCH 1425/3326] Input: 88pm860x-ts - fix child-node lookup commit 906bf7daa0618d0ef39f4872ca42218c29a3631f upstream. Fix child node-lookup during probe, which ended up searching the whole device tree depth-first starting at parent rather than just matching on its children. To make things worse, the parent node was prematurely freed, while the child node was leaked. Fixes: 2e57d56747e6 ("mfd: 88pm860x: Device tree support") Signed-off-by: Johan Hovold Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/touchscreen/88pm860x-ts.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c index 7ed828a51f4c87..3486d940380566 100644 --- a/drivers/input/touchscreen/88pm860x-ts.c +++ b/drivers/input/touchscreen/88pm860x-ts.c @@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, int data, n, ret; if (!np) return -ENODEV; - np = of_find_node_by_name(np, "touch"); + np = of_get_child_by_name(np, "touch"); if (!np) { dev_err(&pdev->dev, "Can't find touch node\n"); return -EINVAL; @@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, if (data) { ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data); if (ret < 0) - return -EINVAL; + goto err_put_node; } /* set tsi prebias time */ if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) { ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data); if (ret < 0) - return -EINVAL; + goto err_put_node; } /* set prebias & prechg time of pen detect */ data = 0; @@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, if (data) { ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data); if (ret < 0) - return -EINVAL; + goto err_put_node; } of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x); + + of_node_put(np); + return 0; + +err_put_node: + of_node_put(np); + + return -EINVAL; } #else #define pm860x_touch_dt_init(x, y, z) (-1) From aabc966e7b62812c555c17fe73d03b180e6eedf5 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 8 Jan 2018 17:17:48 -0800 Subject: [PATCH 1426/3326] Input: twl6040-vibra - fix child-node lookup commit dcaf12a8b0bbdbfcfa2be8dff2c4948d9844b4ad upstream. Fix child-node lookup during probe, which ended up searching the whole device tree depth-first starting at parent rather than just matching on its children. Later sanity checks on node properties (which would likely be missing) should prevent this from causing much trouble however, especially as the original premature free of the parent node has already been fixed separately (but that "fix" was apparently never backported to stable). Fixes: e7ec014a47e4 ("Input: twl6040-vibra - update for device tree support") Fixes: c52c545ead97 ("Input: twl6040-vibra - fix DT node memory management") Signed-off-by: Johan Hovold Acked-by: Peter Ujfalusi Tested-by: H. Nikolaus Schaller (on Pyra OMAP5 hardware) Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/misc/twl6040-vibra.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c index 5690eb7ff954d2..15e0d352c4cc24 100644 --- a/drivers/input/misc/twl6040-vibra.c +++ b/drivers/input/misc/twl6040-vibra.c @@ -248,8 +248,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev) int vddvibr_uV = 0; int error; - of_node_get(twl6040_core_dev->of_node); - twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node, + twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node, "vibra"); if (!twl6040_core_node) { dev_err(&pdev->dev, "parent of node is missing?\n"); From b1c7c57f5ac55f7fb3f848edfd6619ebbe6ac625 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 8 Jan 2018 17:15:06 -0800 Subject: [PATCH 1427/3326] Input: twl4030-vibra - fix sibling-node lookup commit 5b189201993ab03001a398de731045bfea90c689 upstream. A helper purported to look up a child node based on its name was using the wrong of-helper and ended up prematurely freeing the parent of-node while searching the whole device tree depth-first starting at the parent node. Fixes: 64b9e4d803b1 ("input: twl4030-vibra: Support for DT booted kernel") Fixes: e661d0a04462 ("Input: twl4030-vibra - fix ERROR: Bad of_node_put() warning") Signed-off-by: Johan Hovold Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/misc/twl4030-vibra.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c index 6c51d404874bbd..c37aea9ac272a1 100644 --- a/drivers/input/misc/twl4030-vibra.c +++ b/drivers/input/misc/twl4030-vibra.c @@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops, twl4030_vibra_suspend, twl4030_vibra_resume); static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata, - struct device_node *node) + struct device_node *parent) { + struct device_node *node; + if (pdata && pdata->coexist) return true; - node = of_find_node_by_name(node, "codec"); + node = of_get_child_by_name(parent, "codec"); if (node) { of_node_put(node); return true; From 7296666e933c61438818fe160b1e9f9d396c7932 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 18 Jan 2018 15:53:10 -0500 Subject: [PATCH 1428/3326] tracing: Fix converting enum's from the map in trace_event_eval_update() commit 1ebe1eaf2f02784921759992ae1fde1a9bec8fd0 upstream. Since enums do not get converted by the TRACE_EVENT macro into their values, the event format displaces the enum name and not the value. This breaks tools like perf and trace-cmd that need to interpret the raw binary data. To solve this, an enum map was created to convert these enums into their actual numbers on boot up. This is done by TRACE_EVENTS() adding a TRACE_DEFINE_ENUM() macro. Some enums were not being converted. This was caused by an optization that had a bug in it. All calls get checked against this enum map to see if it should be converted or not, and it compares the call's system to the system that the enum map was created under. If they match, then they call is processed. To cut down on the number of iterations needed to find the maps with a matching system, since calls and maps are grouped by system, when a match is made, the index into the map array is saved, so that the next call, if it belongs to the same system as the previous call, could start right at that array index and not have to scan all the previous arrays. The problem was, the saved index was used as the variable to know if this is a call in a new system or not. If the index was zero, it was assumed that the call is in a new system and would keep incrementing the saved index until it found a matching system. The issue arises when the first matching system was at index zero. The next map, if it belonged to the same system, would then think it was the first match and increment the index to one. If the next call belong to the same system, it would begin its search of the maps off by one, and miss the first enum that should be converted. This left a single enum not converted properly. Also add a comment to describe exactly what that index was for. It took me a bit too long to figure out what I was thinking when debugging this issue. Link: http://lkml.kernel.org/r/717BE572-2070-4C1E-9902-9F2E0FEDA4F8@oracle.com Fixes: 0c564a538aa93 ("tracing: Add TRACE_DEFINE_ENUM() macro to map enums to their values") Reported-by: Chuck Lever Teste-by: Chuck Lever Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_events.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 87468398b9ed62..d53268a4e1671b 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -2213,6 +2213,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len) { struct trace_event_call *call, *p; const char *last_system = NULL; + bool first = false; int last_i; int i; @@ -2220,15 +2221,28 @@ void trace_event_eval_update(struct trace_eval_map **map, int len) list_for_each_entry_safe(call, p, &ftrace_events, list) { /* events are usually grouped together with systems */ if (!last_system || call->class->system != last_system) { + first = true; last_i = 0; last_system = call->class->system; } + /* + * Since calls are grouped by systems, the likelyhood that the + * next call in the iteration belongs to the same system as the + * previous call is high. As an optimization, we skip seaching + * for a map[] that matches the call's system if the last call + * was from the same system. That's what last_i is for. If the + * call has the same system as the previous call, then last_i + * will be the index of the first map[] that has a matching + * system. + */ for (i = last_i; i < len; i++) { if (call->class->system == map[i]->system) { /* Save the first system if need be */ - if (!last_i) + if (first) { last_i = i; + first = false; + } update_event_printk(call, map[i]); } } From c0443694ae877a45265fd58b49d68cbb893a9571 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 12 Jan 2018 11:12:05 +0100 Subject: [PATCH 1429/3326] phy: work around 'phys' references to usb-nop-xceiv devices commit b7563e2796f8b23c98afcfea7363194227fa089d upstream. Stefan Wahren reports a problem with a warning fix that was merged for v4.15: we had lots of device nodes with a 'phys' property pointing to a device node that is not compliant with the binding documented in Documentation/devicetree/bindings/phy/phy-bindings.txt This generally works because USB HCD drivers that support both the generic phy subsystem and the older usb-phy subsystem ignore most errors from phy_get() and related calls and then use the usb-phy driver instead. However, it turns out that making the usb-nop-xceiv device compatible with the generic-phy binding changes the phy_get() return code from -EINVAL to -EPROBE_DEFER, and the dwc2 usb controller driver for bcm2835 now returns -EPROBE_DEFER from its probe function rather than ignoring the failure, breaking all USB support on raspberry-pi when CONFIG_GENERIC_PHY is enabled. The same code is used in the dwc3 driver and the usb_add_hcd() function, so a reasonable assumption would be that many other platforms are affected as well. I have reviewed all the related patches and concluded that "usb-nop-xceiv" is the only USB phy that is affected by the change, and since it is by far the most commonly referenced phy, all the other USB phy drivers appear to be used in ways that are are either safe in DT (they don't use the 'phys' property), or in the driver (they already ignore -EPROBE_DEFER from generic-phy when usb-phy is available). To work around the problem, this adds a special case to _of_phy_get() so we ignore any PHY node that is compatible with "usb-nop-xceiv", as we know that this can never load no matter how much we defer. In the future, we might implement a generic-phy driver for "usb-nop-xceiv" and then remove this workaround. Since we generally want older kernels to also want to work with the fixed devicetree files, it would be good to backport the patch into stable kernels as well (3.13+ are possibly affected), even though they don't contain any of the patches that may have caused regressions. Fixes: 014d6da6cb25 ARM: dts: bcm283x: Fix DTC warnings about missing phy-cells Fixes: c5bbf358b790 arm: dts: nspire: Add missing #phy-cells to usb-nop-xceiv Fixes: 44e5dced2ef6 arm: dts: marvell: Add missing #phy-cells to usb-nop-xceiv Fixes: f568f6f554b8 ARM: dts: omap: Add missing #phy-cells to usb-nop-xceiv Fixes: d745d5f277bf ARM: dts: imx51-zii-rdu1: Add missing #phy-cells to usb-nop-xceiv Fixes: 915fbe59cbf2 ARM: dts: imx: Add missing #phy-cells to usb-nop-xceiv Link: https://marc.info/?l=linux-usb&m=151518314314753&w=2 Link: https://patchwork.kernel.org/patch/10158145/ Cc: Felipe Balbi Cc: Eric Anholt Tested-by: Stefan Wahren Acked-by: Rob Herring Tested-by: Hans Verkuil Acked-by: Kishon Vijay Abraham I Signed-off-by: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- drivers/phy/phy-core.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index a268f4d6f3e900..48a365e303e5a9 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -395,6 +395,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index) if (ret) return ERR_PTR(-ENODEV); + /* This phy type handled by the usb-phy subsystem for now */ + if (of_device_is_compatible(args.np, "usb-nop-xceiv")) + return ERR_PTR(-ENODEV); + mutex_lock(&phy_provider_mutex); phy_provider = of_phy_provider_lookup(args.np); if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) { From ebc8d9c33fa89edf71198e82467a005d967259ad Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Tue, 25 Jul 2017 16:51:20 +0200 Subject: [PATCH 1430/3326] ARM64: dts: marvell: armada-cp110: Fix clock resources for various node commit e3af9f7c6ece29fdb7fe0aeb83ac5d3077a06edb upstream. On the CP modules we found on Armada 7K/8K, many IP block actually also need a "functional" clock (from the bus). This patch add them which allows to fix some issues hanging the kernel: If Ethernet and sdhci driver are built as modules and sdhci was loaded first then the kernel hang. Fixes: bb16ea1742c8 ("mmc: sdhci-xenon: Fix clock resource by adding an optional bus clock") Reported-by: Riku Voipio Signed-off-by: Gregory CLEMENT Signed-off-by: Greg Kroah-Hartman --- .../arm64/boot/dts/marvell/armada-cp110-master.dtsi | 13 ++++++++----- arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi | 9 ++++++--- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi index f2aa2a81de4dd2..32690107c1ccea 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi @@ -63,8 +63,10 @@ cpm_ethernet: ethernet@0 { compatible = "marvell,armada-7k-pp22"; reg = <0x0 0x100000>, <0x129000 0xb000>; - clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, <&cpm_clk 1 5>; - clock-names = "pp_clk", "gop_clk", "mg_clk"; + clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, + <&cpm_clk 1 5>, <&cpm_clk 1 18>; + clock-names = "pp_clk", "gop_clk", + "mg_clk","axi_clk"; marvell,system-controller = <&cpm_syscon0>; status = "disabled"; dma-coherent; @@ -114,7 +116,8 @@ #size-cells = <0>; compatible = "marvell,orion-mdio"; reg = <0x12a200 0x10>; - clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>; + clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>, + <&cpm_clk 1 6>, <&cpm_clk 1 18>; status = "disabled"; }; @@ -295,8 +298,8 @@ compatible = "marvell,armada-cp110-sdhci"; reg = <0x780000 0x300>; interrupts = ; - clock-names = "core"; - clocks = <&cpm_clk 1 4>; + clock-names = "core","axi"; + clocks = <&cpm_clk 1 4>, <&cpm_clk 1 18>; dma-coherent; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi index 4fe70323abb3a5..14e47c5c38161d 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi @@ -63,8 +63,10 @@ cps_ethernet: ethernet@0 { compatible = "marvell,armada-7k-pp22"; reg = <0x0 0x100000>, <0x129000 0xb000>; - clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, <&cps_clk 1 5>; - clock-names = "pp_clk", "gop_clk", "mg_clk"; + clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, + <&cps_clk 1 5>, <&cps_clk 1 18>; + clock-names = "pp_clk", "gop_clk", + "mg_clk", "axi_clk"; marvell,system-controller = <&cps_syscon0>; status = "disabled"; dma-coherent; @@ -114,7 +116,8 @@ #size-cells = <0>; compatible = "marvell,orion-mdio"; reg = <0x12a200 0x10>; - clocks = <&cps_clk 1 9>, <&cps_clk 1 5>; + clocks = <&cps_clk 1 9>, <&cps_clk 1 5>, + <&cps_clk 1 6>, <&cps_clk 1 18>; status = "disabled"; }; From fb7f328c32c54a87b29f519959facc4909a6daef Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Fri, 19 Jan 2018 14:32:08 +0100 Subject: [PATCH 1431/3326] ARM: sunxi_defconfig: Enable CMA commit c13e7f313da33d1488355440f1a10feb1897480a upstream. The DRM driver most notably, but also out of tree drivers (for now) like the VPU or GPU drivers, are quite big consumers of large, contiguous memory buffers. However, the sunxi_defconfig doesn't enable CMA in order to mitigate that, which makes them almost unusable. Enable it to make sure it somewhat works. Signed-off-by: Maxime Ripard Signed-off-by: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- arch/arm/configs/sunxi_defconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index 5caaf971fb500c..df433abfcb0284 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig @@ -10,6 +10,7 @@ CONFIG_SMP=y CONFIG_NR_CPUS=8 CONFIG_AEABI=y CONFIG_HIGHMEM=y +CONFIG_CMA=y CONFIG_ARM_APPENDED_DTB=y CONFIG_ARM_ATAG_DTB_COMPAT=y CONFIG_CPU_FREQ=y @@ -33,6 +34,7 @@ CONFIG_CAN_SUN4I=y # CONFIG_WIRELESS is not set CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMA_CMA=y CONFIG_BLK_DEV_SD=y CONFIG_ATA=y CONFIG_AHCI_SUNXI=y From e53050a85f657f55c2036beea2cbee698d47b6d7 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 4 Jan 2018 17:53:12 +0100 Subject: [PATCH 1432/3326] ARM: dts: kirkwood: fix pin-muxing of MPP7 on OpenBlocks A7 commit 56aeb07c914a616ab84357d34f8414a69b140cdf upstream. MPP7 is currently muxed as "gpio", but this function doesn't exist for MPP7, only "gpo" is available. This causes the following error: kirkwood-pinctrl f1010000.pin-controller: unsupported function gpio on pin mpp7 pinctrl core: failed to register map default (6): invalid type given kirkwood-pinctrl f1010000.pin-controller: error claiming hogs: -22 kirkwood-pinctrl f1010000.pin-controller: could not claim hogs: -22 kirkwood-pinctrl f1010000.pin-controller: unable to register pinctrl driver kirkwood-pinctrl: probe of f1010000.pin-controller failed with error -22 So the pinctrl driver is not probed, all device drivers (including the UART driver) do a -EPROBE_DEFER, and therefore the system doesn't really boot (well, it boots, but with no UART, and no devices that require pin-muxing). Back when the Device Tree file for this board was introduced, the definition was already wrong. The pinctrl driver also always described as "gpo" this function for MPP7. However, between Linux 4.10 and 4.11, a hog pin failing to be muxed was turned from a simple warning to a hard error that caused the entire pinctrl driver probe to bail out. This is probably the result of commit 6118714275f0a ("pinctrl: core: Fix pinctrl_register_and_init() with pinctrl_enable()"). This commit fixes the Device Tree to use the proper "gpo" function for MPP7, which fixes the boot of OpenBlocks A7, which was broken since Linux 4.11. Fixes: f24b56cbcd9d ("ARM: kirkwood: add support for OpenBlocks A7 platform") Signed-off-by: Thomas Petazzoni Reviewed-by: Andrew Lunn Signed-off-by: Gregory CLEMENT Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/kirkwood-openblocks_a7.dts | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts index cf2f5240e176d0..27cc913ca0f569 100644 --- a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts +++ b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts @@ -53,7 +53,8 @@ }; pinctrl: pin-controller@10000 { - pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>; + pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header + &pmx_gpio_header_gpo>; pinctrl-names = "default"; pmx_uart0: pmx-uart0 { @@ -85,11 +86,16 @@ * ground. */ pmx_gpio_header: pmx-gpio-header { - marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28", + marvell,pins = "mpp17", "mpp29", "mpp28", "mpp35", "mpp34", "mpp40"; marvell,function = "gpio"; }; + pmx_gpio_header_gpo: pxm-gpio-header-gpo { + marvell,pins = "mpp7"; + marvell,function = "gpo"; + }; + pmx_gpio_init: pmx-init { marvell,pins = "mpp38"; marvell,function = "gpio"; From 659af12a2780dd552b6702dafd4598900aeb29e9 Mon Sep 17 00:00:00 2001 From: Stephane Grosjean Date: Mon, 15 Jan 2018 16:31:19 +0100 Subject: [PATCH 1433/3326] can: peak: fix potential bug in packet fragmentation commit d8a243af1a68395e07ac85384a2740d4134c67f4 upstream. In some rare conditions when running one PEAK USB-FD interface over a non high-speed USB controller, one useless USB fragment might be sent. This patch fixes the way a USB command is fragmented when its length is greater than 64 bytes and when the underlying USB controller is not a high-speed one. Signed-off-by: Stephane Grosjean Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/usb/peak_usb/pcan_usb_fd.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 7ccdc3e30c98c2..53d6bb045e9e91 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) void *cmd_head = pcan_usb_fd_cmd_buffer(dev); int err = 0; u8 *packet_ptr; - int i, n = 1, packet_len; + int packet_len; ptrdiff_t cmd_len; /* usb device unregistered? */ @@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) } packet_ptr = cmd_head; + packet_len = cmd_len; /* firmware is not able to re-assemble 512 bytes buffer in full-speed */ - if ((dev->udev->speed != USB_SPEED_HIGH) && - (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) { - packet_len = PCAN_UFD_LOSPD_PKT_SIZE; - n += cmd_len / packet_len; - } else { - packet_len = cmd_len; - } + if (unlikely(dev->udev->speed != USB_SPEED_HIGH)) + packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE); - for (i = 0; i < n; i++) { + do { err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT), @@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) } packet_ptr += packet_len; - } + cmd_len -= packet_len; + + if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE) + packet_len = cmd_len; + + } while (packet_len > 0); return err; } From 91c5fbf1e20b0c0508fdeb5bc0763c65a935f480 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Tue, 16 Jan 2018 19:30:14 +0100 Subject: [PATCH 1434/3326] can: af_can: can_rcv(): replace WARN_ONCE by pr_warn_once commit 8cb68751c115d176ec851ca56ecfbb411568c9e8 upstream. If an invalid CAN frame is received, from a driver or from a tun interface, a Kernel warning is generated. This patch replaces the WARN_ONCE by a simple pr_warn_once, so that a kernel, bootet with panic_on_warn, does not panic. A printk seems to be more appropriate here. Reported-by: syzbot+4386709c0c1284dca827@syzkaller.appspotmail.com Suggested-by: Dmitry Vyukov Acked-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- net/can/af_can.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/net/can/af_can.c b/net/can/af_can.c index ecd5c703d11e85..c94e619329a9a2 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -721,20 +721,16 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev, { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - if (WARN_ONCE(dev->type != ARPHRD_CAN || - skb->len != CAN_MTU || - cfd->len > CAN_MAX_DLEN, - "PF_CAN: dropped non conform CAN skbuf: " - "dev type %d, len %d, datalen %d\n", - dev->type, skb->len, cfd->len)) - goto drop; + if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU || + cfd->len > CAN_MAX_DLEN)) { + pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n", + dev->type, skb->len, cfd->len); + kfree_skb(skb); + return NET_RX_DROP; + } can_receive(skb, dev); return NET_RX_SUCCESS; - -drop: - kfree_skb(skb); - return NET_RX_DROP; } static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, From 41812e400fc0f6aa2ff4fde54462b32b2c9884f6 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Tue, 16 Jan 2018 19:30:14 +0100 Subject: [PATCH 1435/3326] can: af_can: canfd_rcv(): replace WARN_ONCE by pr_warn_once commit d4689846881d160a4d12a514e991a740bcb5d65a upstream. If an invalid CANFD frame is received, from a driver or from a tun interface, a Kernel warning is generated. This patch replaces the WARN_ONCE by a simple pr_warn_once, so that a kernel, bootet with panic_on_warn, does not panic. A printk seems to be more appropriate here. Reported-by: syzbot+e3b775f40babeff6e68b@syzkaller.appspotmail.com Suggested-by: Dmitry Vyukov Acked-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- net/can/af_can.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/net/can/af_can.c b/net/can/af_can.c index c94e619329a9a2..e3626e8500c244 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -738,20 +738,16 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - if (WARN_ONCE(dev->type != ARPHRD_CAN || - skb->len != CANFD_MTU || - cfd->len > CANFD_MAX_DLEN, - "PF_CAN: dropped non conform CAN FD skbuf: " - "dev type %d, len %d, datalen %d\n", - dev->type, skb->len, cfd->len)) - goto drop; + if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU || + cfd->len > CANFD_MAX_DLEN)) { + pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n", + dev->type, skb->len, cfd->len); + kfree_skb(skb); + return NET_RX_DROP; + } can_receive(skb, dev); return NET_RX_SUCCESS; - -drop: - kfree_skb(skb); - return NET_RX_DROP; } /* From c7b8be81fc5506a6f2f9f01a06f6806bcca7d48a Mon Sep 17 00:00:00 2001 From: Jeremy Compostella Date: Wed, 15 Nov 2017 12:31:44 -0700 Subject: [PATCH 1436/3326] i2c: core-smbus: prevent stack corruption on read I2C_BLOCK_DATA commit 89c6efa61f5709327ecfa24bff18e57a4e80c7fa upstream. On a I2C_SMBUS_I2C_BLOCK_DATA read request, if data->block[0] is greater than I2C_SMBUS_BLOCK_MAX + 1, the underlying I2C driver writes data out of the msgbuf1 array boundary. It is possible from a user application to run into that issue by calling the I2C_SMBUS ioctl with data.block[0] greater than I2C_SMBUS_BLOCK_MAX + 1. This patch makes the code compliant with Documentation/i2c/dev-interface by raising an error when the requested size is larger than 32 bytes. Call Trace: [] dump_stack+0x67/0x92 [] panic+0xc5/0x1eb [] ? vprintk_default+0x1f/0x30 [] ? i2cdev_ioctl_smbus+0x303/0x320 [] __stack_chk_fail+0x1b/0x20 [] i2cdev_ioctl_smbus+0x303/0x320 [] i2cdev_ioctl+0x4d/0x1e0 [] do_vfs_ioctl+0x2ba/0x490 [] ? security_file_ioctl+0x43/0x60 [] SyS_ioctl+0x79/0x90 [] entry_SYSCALL_64_fastpath+0x12/0x6a Signed-off-by: Jeremy Compostella Signed-off-by: Wolfram Sang Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/i2c-core-smbus.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index 10f00a82ec9dba..e54a9b835b62eb 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -396,16 +396,17 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, the underlying bus driver */ break; case I2C_SMBUS_I2C_BLOCK_DATA: + if (data->block[0] > I2C_SMBUS_BLOCK_MAX) { + dev_err(&adapter->dev, "Invalid block %s size %d\n", + read_write == I2C_SMBUS_READ ? "read" : "write", + data->block[0]); + return -EINVAL; + } + if (read_write == I2C_SMBUS_READ) { msg[1].len = data->block[0]; } else { msg[0].len = data->block[0] + 1; - if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) { - dev_err(&adapter->dev, - "Invalid block write size %d\n", - data->block[0]); - return -EINVAL; - } for (i = 1; i <= data->block[0]; i++) msgbuf0[i] = data->block[i]; } From 4616525bfdc3222169e4836d338d79242542984c Mon Sep 17 00:00:00 2001 From: Xi Kangjie Date: Thu, 18 Jan 2018 16:34:00 -0800 Subject: [PATCH 1437/3326] scripts/gdb/linux/tasks.py: fix get_thread_info commit 883d50f56d263f70fd73c0d96b09eb36c34e9305 upstream. Since kernel 4.9, the thread_info has been moved into task_struct, no longer locates at the bottom of kernel stack. See commits c65eacbe290b ("sched/core: Allow putting thread_info into task_struct") and 15f4eae70d36 ("x86: Move thread_info into task_struct"). Before fix: (gdb) set $current = $lx_current() (gdb) p $lx_thread_info($current) $1 = {flags = 1470918301} (gdb) p $current.thread_info $2 = {flags = 2147483648} After fix: (gdb) p $lx_thread_info($current) $1 = {flags = 2147483648} (gdb) p $current.thread_info $2 = {flags = 2147483648} Link: http://lkml.kernel.org/r/20180118210159.17223-1-imxikangjie@gmail.com Fixes: 15f4eae70d36 ("x86: Move thread_info into task_struct") Signed-off-by: Xi Kangjie Acked-by: Jan Kiszka Acked-by: Kieran Bingham Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- scripts/gdb/linux/tasks.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py index 1bf949c43b76cd..f6ab3ccf698ff4 100644 --- a/scripts/gdb/linux/tasks.py +++ b/scripts/gdb/linux/tasks.py @@ -96,6 +96,8 @@ def get_thread_info(task): thread_info_addr = task.address + ia64_task_size thread_info = thread_info_addr.cast(thread_info_ptr_type) else: + if task.type.fields()[0].type == thread_info_type.get_type(): + return task['thread_info'] thread_info = task['stack'].cast(thread_info_ptr_type) return thread_info.dereference() From ea5c2944328c46d17cc67879d6bee964c4de9be2 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Thu, 18 Jan 2018 16:34:05 -0800 Subject: [PATCH 1438/3326] proc: fix coredump vs read /proc/*/stat race commit 8bb2ee192e482c5d500df9f2b1b26a560bd3026f upstream. do_task_stat() accesses IP and SP of a task without bumping reference count of a stack (which became an entity with independent lifetime at some point). Steps to reproduce: #include #include #include #include #include #include #include #include int main(void) { setrlimit(RLIMIT_CORE, &(struct rlimit){}); while (1) { char buf[64]; char buf2[4096]; pid_t pid; int fd; pid = fork(); if (pid == 0) { *(volatile int *)0 = 0; } snprintf(buf, sizeof(buf), "/proc/%u/stat", pid); fd = open(buf, O_RDONLY); read(fd, buf2, sizeof(buf2)); close(fd); waitpid(pid, NULL, 0); } return 0; } BUG: unable to handle kernel paging request at 0000000000003fd8 IP: do_task_stat+0x8b4/0xaf0 PGD 800000003d73e067 P4D 800000003d73e067 PUD 3d558067 PMD 0 Oops: 0000 [#1] PREEMPT SMP PTI CPU: 0 PID: 1417 Comm: a.out Not tainted 4.15.0-rc8-dirty #2 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc27 04/01/2014 RIP: 0010:do_task_stat+0x8b4/0xaf0 Call Trace: proc_single_show+0x43/0x70 seq_read+0xe6/0x3b0 __vfs_read+0x1e/0x120 vfs_read+0x84/0x110 SyS_read+0x3d/0xa0 entry_SYSCALL_64_fastpath+0x13/0x6c RIP: 0033:0x7f4d7928cba0 RSP: 002b:00007ffddb245158 EFLAGS: 00000246 Code: 03 b7 a0 01 00 00 4c 8b 4c 24 70 4c 8b 44 24 78 4c 89 74 24 18 e9 91 f9 ff ff f6 45 4d 02 0f 84 fd f7 ff ff 48 8b 45 40 48 89 ef <48> 8b 80 d8 3f 00 00 48 89 44 24 20 e8 9b 97 eb ff 48 89 44 24 RIP: do_task_stat+0x8b4/0xaf0 RSP: ffffc90000607cc8 CR2: 0000000000003fd8 John Ogness said: for my tests I added an else case to verify that the race is hit and correctly mitigated. Link: http://lkml.kernel.org/r/20180116175054.GA11513@avx2 Signed-off-by: Alexey Dobriyan Reported-by: "Kohli, Gaurav" Tested-by: John Ogness Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/proc/array.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/fs/proc/array.c b/fs/proc/array.c index 9390032a11e13d..e6094a15ef3078 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -424,8 +424,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, * safe because the task has stopped executing permanently. */ if (permitted && (task->flags & PF_DUMPCORE)) { - eip = KSTK_EIP(task); - esp = KSTK_ESP(task); + if (try_get_task_stack(task)) { + eip = KSTK_EIP(task); + esp = KSTK_ESP(task); + put_task_stack(task); + } } } From 25b13c2f4f4941b60aaaba33a0b18704b9afce05 Mon Sep 17 00:00:00 2001 From: Xinyu Lin Date: Sun, 17 Dec 2017 20:13:39 +0800 Subject: [PATCH 1439/3326] libata: apply MAX_SEC_1024 to all LITEON EP1 series devices commit db5ff909798ef0099004ad50a0ff5fde92426fd1 upstream. LITEON EP1 has the same timeout issues as CX1 series devices. Revert max_sectors to the value of 1024. Fixes: e0edc8c54646 ("libata: apply MAX_SEC_1024 to all CX1-JB*-HP devices") Signed-off-by: Xinyu Lin Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- drivers/ata/libata-core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index ee4c1ec9dca0ef..e7ded346d94bd2 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4439,6 +4439,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { * https://bugzilla.kernel.org/show_bug.cgi?id=121671 */ { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, + { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 }, /* Devices we expect to fail diagnostics */ From edd0a403a12186aecd2e04c0a37f983cdf9c5810 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 10 Jan 2018 09:34:02 +0100 Subject: [PATCH 1440/3326] scsi: libsas: Disable asynchronous aborts for SATA devices commit c9f926000fe3b84135a81602a9f7e63a6a7898e2 upstream. Handling CD-ROM devices from libsas is decidedly odd, as libata relies on SCSI EH to be started to figure out that no medium is present. So we cannot do asynchronous aborts for SATA devices. Fixes: 909657615d9 ("scsi: libsas: allow async aborts") Signed-off-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Tested-by: Yves-Alexis Perez Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/libsas/sas_scsi_host.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index ea8ad06ff582ee..10b17da20176d9 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -486,15 +486,28 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type, int sas_eh_abort_handler(struct scsi_cmnd *cmd) { - int res; + int res = TMF_RESP_FUNC_FAILED; struct sas_task *task = TO_SAS_TASK(cmd); struct Scsi_Host *host = cmd->device->host; + struct domain_device *dev = cmd_to_domain_dev(cmd); struct sas_internal *i = to_sas_internal(host->transportt); + unsigned long flags; if (!i->dft->lldd_abort_task) return FAILED; - res = i->dft->lldd_abort_task(task); + spin_lock_irqsave(host->host_lock, flags); + /* We cannot do async aborts for SATA devices */ + if (dev_is_sata(dev) && !host->host_eh_scheduled) { + spin_unlock_irqrestore(host->host_lock, flags); + return FAILED; + } + spin_unlock_irqrestore(host->host_lock, flags); + + if (task) + res = i->dft->lldd_abort_task(task); + else + SAS_DPRINTK("no task to abort\n"); if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) return SUCCESS; From a6d5930ccf3c746fa737b221d42adb6811945eec Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 11 Jan 2018 09:53:35 +0900 Subject: [PATCH 1441/3326] workqueue: avoid hard lockups in show_workqueue_state() commit 62635ea8c18f0f62df4cc58379e4f1d33afd5801 upstream. show_workqueue_state() can print out a lot of messages while being in atomic context, e.g. sysrq-t -> show_workqueue_state(). If the console device is slow it may end up triggering NMI hard lockup watchdog. Signed-off-by: Sergey Senozhatsky Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- kernel/workqueue.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a2dccfe1acec34..8365a52a74c523 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -48,6 +48,7 @@ #include #include #include +#include #include "workqueue_internal.h" @@ -4479,6 +4480,12 @@ void show_workqueue_state(void) if (pwq->nr_active || !list_empty(&pwq->delayed_works)) show_pwq(pwq); spin_unlock_irqrestore(&pwq->pool->lock, flags); + /* + * We could be printing a lot from atomic context, e.g. + * sysrq-t -> show_workqueue_state(). Avoid triggering + * hard lockup. + */ + touch_nmi_watchdog(); } } @@ -4506,6 +4513,12 @@ void show_workqueue_state(void) pr_cont("\n"); next_pool: spin_unlock_irqrestore(&pool->lock, flags); + /* + * We could be printing a lot from atomic context, e.g. + * sysrq-t -> show_workqueue_state(). Avoid triggering + * hard lockup. + */ + touch_nmi_watchdog(); } rcu_read_unlock_sched(); From 8f0b1d5cfff904fc2dc4a1389cf3296037d1ec51 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 17 Jan 2018 10:16:20 -0500 Subject: [PATCH 1442/3326] drm/vmwgfx: fix memory corruption with legacy/sou connectors commit 8a510a5c75261ba0ec39155326982aa786541e29 upstream. It looks like in all cases 'struct vmw_connector_state' is used. But only in stdu connectors, was atomic_{duplicate,destroy}_state() properly subclassed. Leading to writes beyond the end of the allocated connector state block and all sorts of fun memory corruption related crashes. Fixes: d7721ca71126 "drm/vmwgfx: Connector atomic state" Signed-off-by: Rob Clark Reviewed-by: Thomas Hellstrom Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index b8a09807c5de8e..3824595fece126 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -266,8 +266,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = { .set_property = vmw_du_connector_set_property, .destroy = vmw_ldu_connector_destroy, .reset = vmw_du_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = vmw_du_connector_duplicate_state, + .atomic_destroy_state = vmw_du_connector_destroy_state, .atomic_set_property = vmw_du_connector_atomic_set_property, .atomic_get_property = vmw_du_connector_atomic_get_property, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index d1552d3e0652b6..7ae38a67388c4d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -420,8 +420,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = { .set_property = vmw_du_connector_set_property, .destroy = vmw_sou_connector_destroy, .reset = vmw_du_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = vmw_du_connector_duplicate_state, + .atomic_destroy_state = vmw_du_connector_destroy_state, .atomic_set_property = vmw_du_connector_atomic_set_property, .atomic_get_property = vmw_du_connector_atomic_get_property, }; From 905637024f550f6b82f0dee77d6a0e251763a6e1 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Wed, 20 Dec 2017 09:56:06 +0000 Subject: [PATCH 1443/3326] dm btree: fix serious bug in btree_split_beneath() commit bc68d0a43560e950850fc69b58f0f8254b28f6d6 upstream. When inserting a new key/value pair into a btree we walk down the spine of btree nodes performing the following 2 operations: i) space for a new entry ii) adjusting the first key entry if the new key is lower than any in the node. If the _root_ node is full, the function btree_split_beneath() allocates 2 new nodes, and redistibutes the root nodes entries between them. The root node is left with 2 entries corresponding to the 2 new nodes. btree_split_beneath() then adjusts the spine to point to one of the two new children. This means the first key is never adjusted if the new key was lower, ie. operation (ii) gets missed out. This can result in the new key being 'lost' for a period; until another low valued key is inserted that will uncover it. This is a serious bug, and quite hard to make trigger in normal use. A reproducing test case ("thin create devices-in-reverse-order") is available as part of the thin-provision-tools project: https://github.com/jthornber/thin-provisioning-tools/blob/master/functional-tests/device-mapper/dm-tests.scm#L593 Fix the issue by changing btree_split_beneath() so it no longer adjusts the spine. Instead it unlocks both the new nodes, and lets the main loop in btree_insert_raw() relock the appropriate one and make any neccessary adjustments. Reported-by: Monty Pavel Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/persistent-data/dm-btree.c | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index f21ce6a3d4cf56..58b319757b1e5a 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key) pn->keys[1] = rn->keys[0]; memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); - /* - * rejig the spine. This is ugly, since it knows too - * much about the spine - */ - if (s->nodes[0] != new_parent) { - unlock_block(s->info, s->nodes[0]); - s->nodes[0] = new_parent; - } - if (key < le64_to_cpu(rn->keys[0])) { - unlock_block(s->info, right); - s->nodes[1] = left; - } else { - unlock_block(s->info, left); - s->nodes[1] = right; - } - s->count = 2; - + unlock_block(s->info, left); + unlock_block(s->info, right); return 0; } From 5f12c33849404e20b24914a5f112ffbb9f8c35b0 Mon Sep 17 00:00:00 2001 From: Dennis Yang Date: Tue, 12 Dec 2017 18:21:40 +0800 Subject: [PATCH 1444/3326] dm thin metadata: THIN_MAX_CONCURRENT_LOCKS should be 6 commit 490ae017f54e55bde382d45ea24bddfb6d1a0aaf upstream. For btree removal, there is a corner case that a single thread could takes 6 locks which is more than THIN_MAX_CONCURRENT_LOCKS(5) and leads to deadlock. A btree removal might eventually call rebalance_children()->rebalance3() to rebalance entries of three neighbor child nodes when shadow_spine has already acquired two write locks. In rebalance3(), it tries to shadow and acquire the write locks of all three child nodes. However, shadowing a child node requires acquiring a read lock of the original child node and a write lock of the new block. Although the read lock will be released after block shadowing, shadowing the third child node in rebalance3() could still take the sixth lock. (2 write locks for shadow_spine + 2 write locks for the first two child nodes's shadow + 1 write lock for the last child node's shadow + 1 read lock for the last child node) Signed-off-by: Dennis Yang Acked-by: Joe Thornber Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-thin-metadata.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index d31d18d9727c69..36ef284ad086b8 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -80,10 +80,14 @@ #define SECTOR_TO_BLOCK_SHIFT 3 /* + * For btree insert: * 3 for btree insert + * 2 for btree lookup used within space map + * For btree remove: + * 2 for shadow spine + + * 4 for rebalance 3 child node */ -#define THIN_MAX_CONCURRENT_LOCKS 5 +#define THIN_MAX_CONCURRENT_LOCKS 6 /* This should be plenty */ #define SPACE_MAP_ROOT_SIZE 128 From c808f1db59561a65c27cb4ffb8515b95e33af221 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 10 Jan 2018 09:32:47 -0500 Subject: [PATCH 1445/3326] dm integrity: don't store cipher request on the stack commit 717f4b1c52135f279112df82583e0c77e80f90de upstream. Some asynchronous cipher implementations may use DMA. The stack may be mapped in the vmalloc area that doesn't support DMA. Therefore, the cipher request and initialization vector shouldn't be on the stack. Fix this by allocating the request and iv with kmalloc. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-integrity.c | 49 +++++++++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 5e6737a44468d6..3cc2052f972c99 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2558,7 +2558,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error) int r = 0; unsigned i; __u64 journal_pages, journal_desc_size, journal_tree_size; - unsigned char *crypt_data = NULL; + unsigned char *crypt_data = NULL, *crypt_iv = NULL; + struct skcipher_request *req = NULL; ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); @@ -2616,9 +2617,20 @@ static int create_journal(struct dm_integrity_c *ic, char **error) if (blocksize == 1) { struct scatterlist *sg; - SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); - unsigned char iv[ivsize]; - skcipher_request_set_tfm(req, ic->journal_crypt); + + req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); + if (!req) { + *error = "Could not allocate crypt request"; + r = -ENOMEM; + goto bad; + } + + crypt_iv = kmalloc(ivsize, GFP_KERNEL); + if (!crypt_iv) { + *error = "Could not allocate iv"; + r = -ENOMEM; + goto bad; + } ic->journal_xor = dm_integrity_alloc_page_list(ic); if (!ic->journal_xor) { @@ -2640,9 +2652,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error) sg_set_buf(&sg[i], va, PAGE_SIZE); } sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); - memset(iv, 0x00, ivsize); + memset(crypt_iv, 0x00, ivsize); - skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); + skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv); init_completion(&comp.comp); comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (do_crypt(true, req, &comp)) @@ -2658,10 +2670,22 @@ static int create_journal(struct dm_integrity_c *ic, char **error) crypto_free_skcipher(ic->journal_crypt); ic->journal_crypt = NULL; } else { - SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); - unsigned char iv[ivsize]; unsigned crypt_len = roundup(ivsize, blocksize); + req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); + if (!req) { + *error = "Could not allocate crypt request"; + r = -ENOMEM; + goto bad; + } + + crypt_iv = kmalloc(ivsize, GFP_KERNEL); + if (!crypt_iv) { + *error = "Could not allocate iv"; + r = -ENOMEM; + goto bad; + } + crypt_data = kmalloc(crypt_len, GFP_KERNEL); if (!crypt_data) { *error = "Unable to allocate crypt data"; @@ -2669,8 +2693,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error) goto bad; } - skcipher_request_set_tfm(req, ic->journal_crypt); - ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); if (!ic->journal_scatterlist) { *error = "Unable to allocate sg list"; @@ -2694,12 +2716,12 @@ static int create_journal(struct dm_integrity_c *ic, char **error) struct skcipher_request *section_req; __u32 section_le = cpu_to_le32(i); - memset(iv, 0x00, ivsize); + memset(crypt_iv, 0x00, ivsize); memset(crypt_data, 0x00, crypt_len); memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le))); sg_init_one(&sg, crypt_data, crypt_len); - skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); + skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv); init_completion(&comp.comp); comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (do_crypt(true, req, &comp)) @@ -2757,6 +2779,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error) } bad: kfree(crypt_data); + kfree(crypt_iv); + skcipher_request_free(req); + return r; } From 6b8fdeadeb79ff48a5a412c0480d1e74a6da7fba Mon Sep 17 00:00:00 2001 From: Milan Broz Date: Wed, 3 Jan 2018 22:48:59 +0100 Subject: [PATCH 1446/3326] dm crypt: fix crash by adding missing check for auth key size commit 27c7003697fc2c78f965984aa224ef26cd6b2949 upstream. If dm-crypt uses authenticated mode with separate MAC, there are two concatenated part of the key structure - key(s) for encryption and authentication key. Add a missing check for authenticated key length. If this key length is smaller than actually provided key, dm-crypt now properly fails instead of crashing. Fixes: ef43aa3806 ("dm crypt: add cryptographic data integrity protection (authenticated encryption)") Reported-by: Salah Coronya Signed-off-by: Milan Broz Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-crypt.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 9fc12f556534b7..4cc3809b2a3a41 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1954,10 +1954,15 @@ static int crypt_setkey(struct crypt_config *cc) /* Ignore extra keys (which are used for IV etc) */ subkey_size = crypt_subkey_size(cc); - if (crypt_integrity_hmac(cc)) + if (crypt_integrity_hmac(cc)) { + if (subkey_size < cc->key_mac_size) + return -EINVAL; + crypt_copy_authenckey(cc->authenc_key, cc->key, subkey_size - cc->key_mac_size, cc->key_mac_size); + } + for (i = 0; i < cc->tfms_count; i++) { if (crypt_integrity_hmac(cc)) r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], From 82e12f1b26bae6b717c33af6395cf476bf3a6461 Mon Sep 17 00:00:00 2001 From: Ondrej Kozina Date: Fri, 12 Jan 2018 16:30:32 +0100 Subject: [PATCH 1447/3326] dm crypt: wipe kernel key copy after IV initialization commit dc94902bde1e158cd19c4deab208e5d6eb382a44 upstream. Loading key via kernel keyring service erases the internal key copy immediately after we pass it in crypto layer. This is wrong because IV is initialized later and we use wrong key for the initialization (instead of real key there's just zeroed block). The bug may cause data corruption if key is loaded via kernel keyring service first and later same crypt device is reactivated using exactly same key in hexbyte representation, or vice versa. The bug (and fix) affects only ciphers using following IVs: essiv, lmk and tcw. Fixes: c538f6ec9f56 ("dm crypt: add ability to use keys from the kernel key retention service") Signed-off-by: Ondrej Kozina Reviewed-by: Milan Broz Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-crypt.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4cc3809b2a3a41..971241409c308e 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2058,9 +2058,6 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string ret = crypt_setkey(cc); - /* wipe the kernel key payload copy in each case */ - memset(cc->key, 0, cc->key_size * sizeof(u8)); - if (!ret) { set_bit(DM_CRYPT_KEY_VALID, &cc->flags); kzfree(cc->key_string); @@ -2528,6 +2525,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key) } } + /* wipe the kernel key payload copy */ + if (cc->key_string) + memset(cc->key, 0, cc->key_size * sizeof(u8)); + return ret; } @@ -2966,6 +2967,9 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) return ret; if (cc->iv_gen_ops && cc->iv_gen_ops->init) ret = cc->iv_gen_ops->init(cc); + /* wipe the kernel key payload copy */ + if (cc->key_string) + memset(cc->key, 0, cc->key_size * sizeof(u8)); return ret; } if (argc == 2 && !strcasecmp(argv[1], "wipe")) { @@ -3012,7 +3016,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type crypt_target = { .name = "crypt", - .version = {1, 18, 0}, + .version = {1, 18, 1}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, From f05431489fe654ba0df6fc8ab272a438bc702c62 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 17 Jan 2018 11:24:26 +0000 Subject: [PATCH 1448/3326] dm crypt: fix error return code in crypt_ctr() commit 3cc2e57c4beabcbbaa46e1ac6d77ca8276a4a42d upstream. Fix to return error code -ENOMEM from the mempool_create_kmalloc_pool() error handling case instead of 0, as done elsewhere in this function. Fixes: ef43aa38063a6 ("dm crypt: add cryptographic data integrity protection (authenticated encryption)") Signed-off-by: Wei Yongjun Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-crypt.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 971241409c308e..554d60394c0663 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2746,6 +2746,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->tag_pool_max_sectors * cc->on_disk_tag_size); if (!cc->tag_pool) { ti->error = "Cannot allocate integrity tags mempool"; + ret = -ENOMEM; goto bad; } From 314a028c1f49cf79e6b443e21a70f5243d7df21f Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Sat, 20 Jan 2018 17:14:02 -0800 Subject: [PATCH 1449/3326] x86: Use __nostackprotect for sme_encrypt_kernel commit 91cfc88c66bf8ab95937606569670cf67fa73e09 upstream. Commit bacf6b499e11 ("x86/mm: Use a struct to reduce parameters for SME PGD mapping") moved some parameters into a structure. The structure was large enough to trigger the stack protection canary in sme_encrypt_kernel which doesn't work this early, causing reboots. Mark sme_encrypt_kernel appropriately to not use the canary. Fixes: bacf6b499e11 ("x86/mm: Use a struct to reduce parameters for SME PGD mapping") Signed-off-by: Laura Abbott Cc: Tom Lendacky Cc: Ingo Molnar Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/mem_encrypt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index e110fab33ed480..48c03c74c7f4b5 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -487,7 +487,7 @@ static unsigned long __init sme_pgtable_calc(unsigned long len) return total; } -void __init sme_encrypt_kernel(struct boot_params *bp) +void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp) { unsigned long workarea_start, workarea_end, workarea_len; unsigned long execute_start, execute_end, execute_len; From 708a15fdbb622e884d6e6fb6e8c30cc6d42ab89d Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Tue, 16 Jan 2018 11:52:59 +0000 Subject: [PATCH 1450/3326] alpha/PCI: Fix noname IRQ level detection commit 86be89939d11a84800f66e2a283b915b704bf33d upstream. The conversion of the alpha architecture PCI host bridge legacy IRQ mapping/swizzling to the new PCI host bridge map/swizzle hooks carried out through: commit 0e4c2eeb758a ("alpha/PCI: Replace pci_fixup_irqs() call with host bridge IRQ mapping hooks") implies that IRQ for devices are now allocated through pci_assign_irq() function in pci_device_probe() that is called when a driver matching a device is found in order to probe the device through the device driver. Alpha noname platforms required IRQ level programming to be executed in sio_fixup_irq_levels(), that is called in noname_init_pci(), a platform hook called within a subsys_initcall. In noname_init_pci(), present IRQs are detected through sio_collect_irq_levels() that check the struct pci_dev->irq number to detect if an IRQ has been allocated for the device. By the time sio_collect_irq_levels() is called, some devices may still have not a matching driver loaded to match them (eg loadable module) therefore their IRQ allocation is still pending - which means that sio_collect_irq_levels() does not programme the correct IRQ level for those devices, causing their IRQ handling to be broken when the device driver is actually loaded and the device is probed. Fix the issue by adding code in the noname map_irq() function (noname_map_irq()) that, whilst mapping/swizzling the IRQ line, it also ensures that the correct IRQ level programming is executed at platform level, fixing the issue. Fixes: 0e4c2eeb758a ("alpha/PCI: Replace pci_fixup_irqs() call with host bridge IRQ mapping hooks") Reported-by: Mikulas Patocka Signed-off-by: Lorenzo Pieralisi Cc: Bjorn Helgaas Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Mikulas Patocka Cc: Meelis Roos Signed-off-by: Matt Turner Signed-off-by: Greg Kroah-Hartman --- arch/alpha/kernel/sys_sio.c | 35 +++++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c index 37bd6d9b8eb963..a6bdc1da47adb7 100644 --- a/arch/alpha/kernel/sys_sio.c +++ b/arch/alpha/kernel/sys_sio.c @@ -102,6 +102,15 @@ sio_pci_route(void) alpha_mv.sys.sio.route_tab); } +static bool sio_pci_dev_irq_needs_level(const struct pci_dev *dev) +{ + if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) && + (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA)) + return false; + + return true; +} + static unsigned int __init sio_collect_irq_levels(void) { @@ -110,8 +119,7 @@ sio_collect_irq_levels(void) /* Iterate through the devices, collecting IRQ levels. */ for_each_pci_dev(dev) { - if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) && - (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA)) + if (!sio_pci_dev_irq_needs_level(dev)) continue; if (dev->irq) @@ -120,8 +128,7 @@ sio_collect_irq_levels(void) return level_bits; } -static void __init -sio_fixup_irq_levels(unsigned int level_bits) +static void __sio_fixup_irq_levels(unsigned int level_bits, bool reset) { unsigned int old_level_bits; @@ -139,12 +146,21 @@ sio_fixup_irq_levels(unsigned int level_bits) */ old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8); - level_bits |= (old_level_bits & 0x71ff); + if (reset) + old_level_bits &= 0x71ff; + + level_bits |= old_level_bits; outb((level_bits >> 0) & 0xff, 0x4d0); outb((level_bits >> 8) & 0xff, 0x4d1); } +static inline void +sio_fixup_irq_levels(unsigned int level_bits) +{ + __sio_fixup_irq_levels(level_bits, true); +} + static inline int noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { @@ -181,7 +197,14 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5; int irq = COMMON_TABLE_LOOKUP, tmp; tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq); - return irq >= 0 ? tmp : -1; + + irq = irq >= 0 ? tmp : -1; + + /* Fixup IRQ level if an actual IRQ mapping is detected */ + if (sio_pci_dev_irq_needs_level(dev) && irq >= 0) + __sio_fixup_irq_levels(1 << irq, false); + + return irq; } static inline int From 550ba9729c05d9a52fbc26cd061c8d65f52b62f4 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 5 Dec 2017 22:28:22 +0000 Subject: [PATCH 1451/3326] MIPS: CM: Drop WARN_ON(vp != 0) commit c04de7b1ad645b61c141df8ca903ba0cc03a57f7 upstream. Since commit 68923cdc2eb3 ("MIPS: CM: Add cluster & block args to mips_cm_lock_other()"), mips_smp_send_ipi_mask() has used mips_cm_lock_other_cpu() with each CPU number, rather than mips_cm_lock_other() with the first VPE in each core. Prior to r6, multicore multithreaded systems such as dual-core dual-thread interAptivs with CPU Idle enabled (e.g. MIPS Creator Ci40) results in mips_cm_lock_other() repeatedly hitting WARN_ON(vp != 0). There doesn't appear to be anything fundamentally wrong about passing a non-zero VP/VPE number, even if it is a core's region that is locked into the other region before r6, so remove that particular WARN_ON(). Fixes: 68923cdc2eb3 ("MIPS: CM: Add cluster & block args to mips_cm_lock_other()") Signed-off-by: James Hogan Reviewed-by: Paul Burton Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/17883/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/mips-cm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index dd5567b1e30555..8f5bd04f320a90 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -292,7 +292,6 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core, *this_cpu_ptr(&cm_core_lock_flags)); } else { WARN_ON(cluster != 0); - WARN_ON(vp != 0); WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); /* From d641053891697cee3253c73a435fc0972b039edf Mon Sep 17 00:00:00 2001 From: Punit Agrawal Date: Thu, 4 Jan 2018 18:24:33 +0000 Subject: [PATCH 1452/3326] KVM: arm/arm64: Check pagesize when allocating a hugepage at Stage 2 commit c507babf10ead4d5c8cca704539b170752a8ac84 upstream. KVM only supports PMD hugepages at stage 2 but doesn't actually check that the provided hugepage memory pagesize is PMD_SIZE before populating stage 2 entries. In cases where the backing hugepage size is smaller than PMD_SIZE (such as when using contiguous hugepages), KVM can end up creating stage 2 mappings that extend beyond the supplied memory. Fix this by checking for the pagesize of userspace vma before creating PMD hugepage at stage 2. Fixes: 66b3923a1a0f77a ("arm64: hugetlb: add support for PTE contiguous bit") Signed-off-by: Punit Agrawal Cc: Marc Zyngier Reviewed-by: Christoffer Dall Signed-off-by: Christoffer Dall Signed-off-by: Greg Kroah-Hartman --- virt/kvm/arm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index b4b69c2d101202..9dea96380339fd 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1310,7 +1310,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, return -EFAULT; } - if (is_vm_hugetlb_page(vma) && !logging_active) { + if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) { hugetlb = true; gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; } else { From 14c7f5bc7a9059037feeb7666ca646c88eaacb5f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 16 Jan 2018 10:23:47 +0000 Subject: [PATCH 1453/3326] arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls commit acfb3b883f6d6a4b5d27ad7fdded11f6a09ae6dd upstream. KVM doesn't follow the SMCCC when it comes to unimplemented calls, and inject an UNDEF instead of returning an error. Since firmware calls are now used for security mitigation, they are becoming more common, and the undef is counter productive. Instead, let's follow the SMCCC which states that -1 must be returned to the caller when getting an unknown function number. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kvm/handle_exit.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 7debb74843a053..380261e258ef8a 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -44,7 +44,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) ret = kvm_psci_call(vcpu); if (ret < 0) { - kvm_inject_undefined(vcpu); + vcpu_set_reg(vcpu, 0, ~0UL); return 1; } @@ -53,7 +53,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) { - kvm_inject_undefined(vcpu); + vcpu_set_reg(vcpu, 0, ~0UL); return 1; } From 020c755fdb04be9cffa5a30c1624990b7c54cb4c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 18 Jan 2018 16:28:26 +0100 Subject: [PATCH 1454/3326] x86/mce: Make machine check speculation protected commit 6f41c34d69eb005e7848716bbcafc979b35037d5 upstream. The machine check idtentry uses an indirect branch directly from the low level code. This evades the speculation protection. Replace it by a direct call into C code and issue the indirect call there so the compiler can apply the proper speculation protection. Signed-off-by: Thomas Gleixner Reviewed-by:Borislav Petkov Reviewed-by: David Woodhouse Niced-by: Peter Zijlstra Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801181626290.1847@nanos Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 2 +- arch/x86/include/asm/traps.h | 1 + arch/x86/kernel/cpu/mcheck/mce.c | 5 +++++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 6923f456dc0714..be6b66464f6a7d 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1258,7 +1258,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1 #endif #ifdef CONFIG_X86_MCE -idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) +idtentry machine_check do_mce has_error_code=0 paranoid=1 #endif /* diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 31051f35cbb768..3de69330e6c50e 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -88,6 +88,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long); #ifdef CONFIG_X86_32 dotraplinkage void do_iret_error(struct pt_regs *, long); #endif +dotraplinkage void do_mce(struct pt_regs *, long); static inline int get_si_code(unsigned long condition) { diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 3b413065c61308..a9e898b71208f1 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1788,6 +1788,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code) void (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check; +dotraplinkage void do_mce(struct pt_regs *regs, long error_code) +{ + machine_check_vector(regs, error_code); +} + /* * Called for each booted CPU to set up machine checks. * Must be called with preempt off: From 22371c489842560f99aa6c12a2c00db8b0ecb752 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 19 Jan 2018 01:14:21 +0900 Subject: [PATCH 1455/3326] retpoline: Introduce start/end markers of indirect thunk commit 736e80a4213e9bbce40a7c050337047128b472ac upstream. Introduce start/end markers of __x86_indirect_thunk_* functions. To make it easy, consolidate .text.__x86.indirect_thunk.* sections to one .text.__x86.indirect_thunk section and put it in the end of kernel text section and adds __indirect_thunk_start/end so that other subsystem (e.g. kprobes) can identify it. Signed-off-by: Masami Hiramatsu Signed-off-by: Thomas Gleixner Acked-by: David Woodhouse Cc: Andi Kleen Cc: Peter Zijlstra Cc: Ananth N Mavinakayanahalli Cc: Arjan van de Ven Cc: Greg Kroah-Hartman Link: https://lkml.kernel.org/r/151629206178.10241.6828804696410044771.stgit@devbox Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/nospec-branch.h | 3 +++ arch/x86/kernel/vmlinux.lds.S | 6 ++++++ arch/x86/lib/retpoline.S | 2 +- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 7b45d842415089..19ba5ad19c656c 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -194,6 +194,9 @@ enum spectre_v2_mitigation { SPECTRE_V2_IBRS, }; +extern char __indirect_thunk_start[]; +extern char __indirect_thunk_end[]; + /* * On VMEXIT we must ensure that no RSB predictions learned in the guest * can be followed in the host, by overwriting the RSB completely. Both diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 1e413a9326aaa1..9b138a06c1a468 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -124,6 +124,12 @@ SECTIONS ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); #endif +#ifdef CONFIG_RETPOLINE + __indirect_thunk_start = .; + *(.text.__x86.indirect_thunk) + __indirect_thunk_end = .; +#endif + /* End of text section */ _etext = .; } :text = 0x9090 diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index cb45c6cb465f4b..d3415dc30f8226 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -9,7 +9,7 @@ #include .macro THUNK reg - .section .text.__x86.indirect_thunk.\reg + .section .text.__x86.indirect_thunk ENTRY(__x86_indirect_thunk_\reg) CFI_STARTPROC From aaadccc92ba402b32cbdd72694dc12f6dd4e9ecf Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 19 Jan 2018 01:14:51 +0900 Subject: [PATCH 1456/3326] kprobes/x86: Blacklist indirect thunk functions for kprobes commit c1804a236894ecc942da7dc6c5abe209e56cba93 upstream. Mark __x86_indirect_thunk_* functions as blacklist for kprobes because those functions can be called from anywhere in the kernel including blacklist functions of kprobes. Signed-off-by: Masami Hiramatsu Signed-off-by: Thomas Gleixner Acked-by: David Woodhouse Cc: Andi Kleen Cc: Peter Zijlstra Cc: Ananth N Mavinakayanahalli Cc: Arjan van de Ven Cc: Greg Kroah-Hartman Link: https://lkml.kernel.org/r/151629209111.10241.5444852823378068683.stgit@devbox Signed-off-by: Greg Kroah-Hartman --- arch/x86/lib/retpoline.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index d3415dc30f8226..dfb2ba91b670dc 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -25,7 +25,8 @@ ENDPROC(__x86_indirect_thunk_\reg) * than one per register with the correct names. So we do it * the simple and nasty way... */ -#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg) +#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym) +#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg) #define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg) GENERATE_THUNK(_ASM_AX) From 4fb8368bb6a0b7a50974722ddc16419b5a4c4559 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 19 Jan 2018 01:15:20 +0900 Subject: [PATCH 1457/3326] kprobes/x86: Disable optimizing on the function jumps to indirect thunk commit c86a32c09f8ced67971a2310e3b0dda4d1749007 upstream. Since indirect jump instructions will be replaced by jump to __x86_indirect_thunk_*, those jmp instruction must be treated as an indirect jump. Since optprobe prohibits to optimize probes in the function which uses an indirect jump, it also needs to find out the function which jump to __x86_indirect_thunk_* and disable optimization. Add a check that the jump target address is between the __indirect_thunk_start/end when optimizing kprobe. Signed-off-by: Masami Hiramatsu Signed-off-by: Thomas Gleixner Acked-by: David Woodhouse Cc: Andi Kleen Cc: Peter Zijlstra Cc: Ananth N Mavinakayanahalli Cc: Arjan van de Ven Cc: Greg Kroah-Hartman Link: https://lkml.kernel.org/r/151629212062.10241.6991266100233002273.stgit@devbox Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/kprobes/opt.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 4f98aad382378a..3668f28cf5fc78 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -40,6 +40,7 @@ #include #include #include +#include #include "common.h" @@ -205,7 +206,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src) } /* Check whether insn is indirect jump */ -static int insn_is_indirect_jump(struct insn *insn) +static int __insn_is_indirect_jump(struct insn *insn) { return ((insn->opcode.bytes[0] == 0xff && (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ @@ -239,6 +240,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) return (start <= target && target <= start + len); } +static int insn_is_indirect_jump(struct insn *insn) +{ + int ret = __insn_is_indirect_jump(insn); + +#ifdef CONFIG_RETPOLINE + /* + * Jump to x86_indirect_thunk_* is treated as an indirect jump. + * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with + * older gcc may use indirect jump. So we add this check instead of + * replace indirect-jump check. + */ + if (!ret) + ret = insn_jump_into_range(insn, + (unsigned long)__indirect_thunk_start, + (unsigned long)__indirect_thunk_end - + (unsigned long)__indirect_thunk_start); +#endif + return ret; +} + /* Decode whole function to ensure any instructions don't jump into target */ static int can_optimize(unsigned long paddr) { From cd8d78e4865be661366720ab69e733fae4e9deea Mon Sep 17 00:00:00 2001 From: "zhenwei.pi" Date: Thu, 18 Jan 2018 09:04:52 +0800 Subject: [PATCH 1458/3326] x86/pti: Document fix wrong index commit 98f0fceec7f84d80bc053e49e596088573086421 upstream. In section <2. Runtime Cost>, fix wrong index. Signed-off-by: zhenwei.pi Signed-off-by: Thomas Gleixner Cc: dave.hansen@linux.intel.com Link: https://lkml.kernel.org/r/1516237492-27739-1-git-send-email-zhenwei.pi@youruncloud.com Signed-off-by: Greg Kroah-Hartman --- Documentation/x86/pti.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt index d11eff61fc9add..5cd58439ad2d6f 100644 --- a/Documentation/x86/pti.txt +++ b/Documentation/x86/pti.txt @@ -78,7 +78,7 @@ this protection comes at a cost: non-PTI SYSCALL entry code, so requires mapping fewer things into the userspace page tables. The downside is that stacks must be switched at entry time. - d. Global pages are disabled for all kernel structures not + c. Global pages are disabled for all kernel structures not mapped into both kernel and userspace page tables. This feature of the MMU allows different processes to share TLB entries mapping the kernel. Losing the feature means more From 5fa871644e6d48f5c120e8c469c96850a01c60c8 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 17 Jan 2018 14:53:28 -0800 Subject: [PATCH 1459/3326] x86/retpoline: Optimize inline assembler for vmexit_fill_RSB commit 3f7d875566d8e79c5e0b2c9a413e91b2c29e0854 upstream. The generated assembler for the C fill RSB inline asm operations has several issues: - The C code sets up the loop register, which is then immediately overwritten in __FILL_RETURN_BUFFER with the same value again. - The C code also passes in the iteration count in another register, which is not used at all. Remove these two unnecessary operations. Just rely on the single constant passed to the macro for the iterations. Signed-off-by: Andi Kleen Signed-off-by: Thomas Gleixner Acked-by: David Woodhouse Cc: dave.hansen@intel.com Cc: gregkh@linuxfoundation.org Cc: torvalds@linux-foundation.org Cc: arjan@linux.intel.com Link: https://lkml.kernel.org/r/20180117225328.15414-1-andi@firstfloor.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/nospec-branch.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 19ba5ad19c656c..4ad41087ce0e77 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -206,16 +206,17 @@ extern char __indirect_thunk_end[]; static inline void vmexit_fill_RSB(void) { #ifdef CONFIG_RETPOLINE - unsigned long loops = RSB_CLEAR_LOOPS / 2; + unsigned long loops; asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE ALTERNATIVE("jmp 910f", __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), X86_FEATURE_RETPOLINE) "910:" - : "=&r" (loops), ASM_CALL_CONSTRAINT - : "r" (loops) : "memory" ); + : "=r" (loops), ASM_CALL_CONSTRAINT + : : "memory" ); #endif } + #endif /* __ASSEMBLY__ */ #endif /* __NOSPEC_BRANCH_H__ */ From 9cc86f994bff4de89de06cedba76e6ef03b9a810 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 17 Jan 2018 17:41:41 -0600 Subject: [PATCH 1460/3326] x86/mm: Rework wbinvd, hlt operation in stop_this_cpu() commit f23d74f6c66c3697e032550eeef3f640391a3a7d upstream. Some issues have been reported with the for loop in stop_this_cpu() that issues the 'wbinvd; hlt' sequence. Reverting this sequence to halt() has been shown to resolve the issue. However, the wbinvd is needed when running with SME. The reason for the wbinvd is to prevent cache flush races between encrypted and non-encrypted entries that have the same physical address. This can occur when kexec'ing from memory encryption active to inactive or vice-versa. The important thing is to not have outside of kernel text memory references (such as stack usage), so the usage of the native_*() functions is needed since these expand as inline asm sequences. So instead of reverting the change, rework the sequence. Move the wbinvd instruction outside of the for loop as native_wbinvd() and make its execution conditional on X86_FEATURE_SME. In the for loop, change the asm 'wbinvd; hlt' sequence back to a halt sequence but use the native_halt() call. Fixes: bba4ed011a52 ("x86/mm, kexec: Allow kexec to be used with SME") Reported-by: Dave Young Signed-off-by: Tom Lendacky Signed-off-by: Thomas Gleixner Tested-by: Dave Young Cc: Juergen Gross Cc: Tony Luck Cc: Yu Chen Cc: Baoquan He Cc: Linus Torvalds Cc: kexec@lists.infradead.org Cc: ebiederm@redhat.com Cc: Borislav Petkov Cc: Rui Zhang Cc: Arjan van de Ven Cc: Boris Ostrovsky Cc: Dan Williams Link: https://lkml.kernel.org/r/20180117234141.21184.44067.stgit@tlendack-t1.amdoffice.net Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/process.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 3cb2486c47e48a..8bd1d8292cf761 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -380,19 +380,24 @@ void stop_this_cpu(void *dummy) disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); + /* + * Use wbinvd on processors that support SME. This provides support + * for performing a successful kexec when going from SME inactive + * to SME active (or vice-versa). The cache must be cleared so that + * if there are entries with the same physical address, both with and + * without the encryption bit, they don't race each other when flushed + * and potentially end up with the wrong entry being committed to + * memory. + */ + if (boot_cpu_has(X86_FEATURE_SME)) + native_wbinvd(); for (;;) { /* - * Use wbinvd followed by hlt to stop the processor. This - * provides support for kexec on a processor that supports - * SME. With kexec, going from SME inactive to SME active - * requires clearing cache entries so that addresses without - * the encryption bit set don't corrupt the same physical - * address that has the encryption bit set when caches are - * flushed. To achieve this a wbinvd is performed followed by - * a hlt. Even if the processor is not in the kexec/SME - * scenario this only adds a wbinvd to a halting processor. + * Use native_halt() so that memory contents don't change + * (stack usage and variables) after possibly issuing the + * native_wbinvd() above. */ - asm volatile("wbinvd; hlt" : : : "memory"); + native_halt(); } } From 3abb4c11038e03c4adc701eff30c45638816eb0f Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 19 Jan 2018 15:49:24 +0300 Subject: [PATCH 1461/3326] mm, page_vma_mapped: Drop faulty pointer arithmetics in check_pte() commit 0d665e7b109d512b7cae3ccef6e8654714887844 upstream. Tetsuo reported random crashes under memory pressure on 32-bit x86 system and tracked down to change that introduced page_vma_mapped_walk(). The root cause of the issue is the faulty pointer math in check_pte(). As ->pte may point to an arbitrary page we have to check that they are belong to the section before doing math. Otherwise it may lead to weird results. It wasn't noticed until now as mem_map[] is virtually contiguous on flatmem or vmemmap sparsemem. Pointer arithmetic just works against all 'struct page' pointers. But with classic sparsemem, it doesn't because each section memap is allocated separately and so consecutive pfns crossing two sections might have struct pages at completely unrelated addresses. Let's restructure code a bit and replace pointer arithmetic with operations on pfns. Signed-off-by: Kirill A. Shutemov Reported-and-tested-by: Tetsuo Handa Acked-by: Michal Hocko Fixes: ace71a19cec5 ("mm: introduce page_vma_mapped_walk()") Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/swapops.h | 21 ++++++++++++++ mm/page_vma_mapped.c | 63 +++++++++++++++++++++++++---------------- 2 files changed, 59 insertions(+), 25 deletions(-) diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 9c5a2628d6ce7b..1d3877c39a000a 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -124,6 +124,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry) return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); } +static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry) +{ + return swp_offset(entry); +} + static inline struct page *device_private_entry_to_page(swp_entry_t entry) { return pfn_to_page(swp_offset(entry)); @@ -154,6 +159,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry) return false; } +static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry) +{ + return 0; +} + static inline struct page *device_private_entry_to_page(swp_entry_t entry) { return NULL; @@ -189,6 +199,11 @@ static inline int is_write_migration_entry(swp_entry_t entry) return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); } +static inline unsigned long migration_entry_to_pfn(swp_entry_t entry) +{ + return swp_offset(entry); +} + static inline struct page *migration_entry_to_page(swp_entry_t entry) { struct page *p = pfn_to_page(swp_offset(entry)); @@ -218,6 +233,12 @@ static inline int is_migration_entry(swp_entry_t swp) { return 0; } + +static inline unsigned long migration_entry_to_pfn(swp_entry_t entry) +{ + return 0; +} + static inline struct page *migration_entry_to_page(swp_entry_t entry) { return NULL; diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index d22b84310f6d40..95601561439532 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -30,10 +30,29 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw) return true; } +/** + * check_pte - check if @pvmw->page is mapped at the @pvmw->pte + * + * page_vma_mapped_walk() found a place where @pvmw->page is *potentially* + * mapped. check_pte() has to validate this. + * + * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary + * page. + * + * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration + * entry that points to @pvmw->page or any subpage in case of THP. + * + * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to + * @pvmw->page or any subpage in case of THP. + * + * Otherwise, return false. + * + */ static bool check_pte(struct page_vma_mapped_walk *pvmw) { + unsigned long pfn; + if (pvmw->flags & PVMW_MIGRATION) { -#ifdef CONFIG_MIGRATION swp_entry_t entry; if (!is_swap_pte(*pvmw->pte)) return false; @@ -41,37 +60,31 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw) if (!is_migration_entry(entry)) return false; - if (migration_entry_to_page(entry) - pvmw->page >= - hpage_nr_pages(pvmw->page)) { - return false; - } - if (migration_entry_to_page(entry) < pvmw->page) - return false; -#else - WARN_ON_ONCE(1); -#endif - } else { - if (is_swap_pte(*pvmw->pte)) { - swp_entry_t entry; - entry = pte_to_swp_entry(*pvmw->pte); - if (is_device_private_entry(entry) && - device_private_entry_to_page(entry) == pvmw->page) - return true; - } + pfn = migration_entry_to_pfn(entry); + } else if (is_swap_pte(*pvmw->pte)) { + swp_entry_t entry; - if (!pte_present(*pvmw->pte)) + /* Handle un-addressable ZONE_DEVICE memory */ + entry = pte_to_swp_entry(*pvmw->pte); + if (!is_device_private_entry(entry)) return false; - /* THP can be referenced by any subpage */ - if (pte_page(*pvmw->pte) - pvmw->page >= - hpage_nr_pages(pvmw->page)) { - return false; - } - if (pte_page(*pvmw->pte) < pvmw->page) + pfn = device_private_entry_to_pfn(entry); + } else { + if (!pte_present(*pvmw->pte)) return false; + + pfn = pte_pfn(*pvmw->pte); } + if (pfn < page_to_pfn(pvmw->page)) + return false; + + /* THP can be referenced by any subpage */ + if (pfn - page_to_pfn(pvmw->page) >= hpage_nr_pages(pvmw->page)) + return false; + return true; } From 0c2870761e9fea8175f52a9113acf93fde81cd51 Mon Sep 17 00:00:00 2001 From: Yan Markman Date: Tue, 28 Nov 2017 14:19:50 +0100 Subject: [PATCH 1462/3326] net: mvpp2: do not disable GMAC padding commit e749aca84b10f3987b2ee1f76e0c7d8aacc5653c upstream. Short fragmented packets may never be sent by the hardware when padding is disabled. This patch stop modifying the GMAC padding bits, to leave them to their reset value (disabled). Fixes: 3919357fb0bb ("net: mvpp2: initialize the GMAC when using a port") Signed-off-by: Yan Markman [Antoine: commit message] Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/marvell/mvpp2.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index fcf9ba5eb8d1db..d147dc7d0f7776 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -4552,11 +4552,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port) MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); - - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - val |= MVPP2_GMAC_DISABLE_PADDING; - val &= ~MVPP2_GMAC_FLOW_CTRL_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | @@ -4564,10 +4559,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port) MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; val &= ~MVPP22_CTRL4_DP_CLK_SEL; writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); - - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - val &= ~MVPP2_GMAC_DISABLE_PADDING; - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); } /* The port is connected to a copper PHY */ From d602cb8a4b133bfe1e9b6170e646cbbf38660ee8 Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Sun, 29 Oct 2017 16:27:21 +0100 Subject: [PATCH 1463/3326] MIPS: AR7: ensure the port type's FCR value is used commit 0a5191efe06b5103909206e4fbcff81d30283f8e upstream. Since commit aef9a7bd9b67 ("serial/uart/8250: Add tunable RX interrupt trigger I/F of FIFO buffers"), the port's default FCR value isn't used in serial8250_do_set_termios anymore, but copied over once in serial8250_config_port and then modified as needed. Unfortunately, serial8250_config_port will never be called if the port is shared between kernel and userspace, and the port's flag doesn't have UPF_BOOT_AUTOCONF, which would trigger a serial8250_config_port as well. This causes garbled output from userspace: [ 5.220000] random: procd urandom read with 49 bits of entropy available ers [kee Fix this by forcing it to be configured on boot, resulting in the expected output: [ 5.250000] random: procd urandom read with 50 bits of entropy available Press the [f] key and hit [enter] to enter failsafe mode Press the [1], [2], [3] or [4] key and hit [enter] to select the debug level Fixes: aef9a7bd9b67 ("serial/uart/8250: Add tunable RX interrupt trigger I/F of FIFO buffers") Signed-off-by: Jonas Gorski Cc: Greg Kroah-Hartman Cc: Yoshihiro YUNOMAE Cc: Florian Fainelli Cc: Nicolas Schichan Cc: linux-mips@linux-mips.org Cc: linux-serial@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/17544/ Signed-off-by: Ralf Baechle Cc: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/ar7/platform.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index 4674f1efbe7a59..e1675c25d5d48c 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c @@ -575,7 +575,7 @@ static int __init ar7_register_uarts(void) uart_port.type = PORT_AR7; uart_port.uartclk = clk_get_rate(bus_clk) / 2; uart_port.iotype = UPIO_MEM32; - uart_port.flags = UPF_FIXED_TYPE; + uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF; uart_port.regshift = 2; uart_port.line = 0; From a16134b082346b7e7c34f594a0763eafacdcea92 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 23 Jan 2018 19:58:21 +0100 Subject: [PATCH 1464/3326] Linux 4.14.15 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4951305eb867d7..bf1a277a67a431 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 14 +SUBLEVEL = 15 EXTRAVERSION = NAME = Petit Gorille From 1d00dacda89dca83b9899fe3ed1588b08bdd4b88 Mon Sep 17 00:00:00 2001 From: Martin Brandenburg Date: Mon, 22 Jan 2018 15:44:51 -0500 Subject: [PATCH 1465/3326] orangefs: use list_for_each_entry_safe in purge_waiting_ops commit 0afc0decf247f65b7aba666a76a0a68adf4bc435 upstream. set_op_state_purged can delete the op. Signed-off-by: Martin Brandenburg Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/orangefs/waitqueue.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c index 835c6e148afccb..0577d6dba8c81d 100644 --- a/fs/orangefs/waitqueue.c +++ b/fs/orangefs/waitqueue.c @@ -29,10 +29,10 @@ static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s */ void purge_waiting_ops(void) { - struct orangefs_kernel_op_s *op; + struct orangefs_kernel_op_s *op, *tmp; spin_lock(&orangefs_request_list_lock); - list_for_each_entry(op, &orangefs_request_list, list) { + list_for_each_entry_safe(op, tmp, &orangefs_request_list, list) { gossip_debug(GOSSIP_WAIT_DEBUG, "pvfs2-client-core: purging op tag %llu %s\n", llu(op->tag), From e1166d9491a0eb7b854d6868e62a9066c7dff277 Mon Sep 17 00:00:00 2001 From: Martin Brandenburg Date: Mon, 22 Jan 2018 15:44:52 -0500 Subject: [PATCH 1466/3326] orangefs: initialize op on loop restart in orangefs_devreq_read commit a0ec1ded22e6a6bc41981fae22406835b006a66e upstream. In orangefs_devreq_read, there is a loop which picks an op off the list of pending ops. If the loop fails to find an op, there is nothing to read, and it returns EAGAIN. If the op has been given up on, the loop is restarted via a goto. The bug is that the variable which the found op is written to is not reinitialized, so if there are no more eligible ops on the list, the code runs again on the already handled op. This is triggered by interrupting a process while the op is being copied to the client-core. It's a fairly small window, but it's there. Signed-off-by: Martin Brandenburg Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/orangefs/devorangefs-req.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c index ded456f17de611..c584ad8d023cde 100644 --- a/fs/orangefs/devorangefs-req.c +++ b/fs/orangefs/devorangefs-req.c @@ -162,7 +162,7 @@ static ssize_t orangefs_devreq_read(struct file *file, struct orangefs_kernel_op_s *op, *temp; __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION; static __s32 magic = ORANGEFS_DEVREQ_MAGIC; - struct orangefs_kernel_op_s *cur_op = NULL; + struct orangefs_kernel_op_s *cur_op; unsigned long ret; /* We do not support blocking IO. */ @@ -186,6 +186,7 @@ static ssize_t orangefs_devreq_read(struct file *file, return -EAGAIN; restart: + cur_op = NULL; /* Get next op (if any) from top of list. */ spin_lock(&orangefs_request_list_lock); list_for_each_entry_safe(op, temp, &orangefs_request_list, list) { From bd9fa7822f789f3ec32058440c246f7924d8ca9a Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 15 Nov 2017 17:38:30 -0800 Subject: [PATCH 1467/3326] mm, page_alloc: fix potential false positive in __zone_watermark_ok commit b050e3769c6b4013bb937e879fc43bf1847ee819 upstream. Since commit 97a16fc82a7c ("mm, page_alloc: only enforce watermarks for order-0 allocations"), __zone_watermark_ok() check for high-order allocations will shortcut per-migratetype free list checks for ALLOC_HARDER allocations, and return true as long as there's free page of any migratetype. The intention is that ALLOC_HARDER can allocate from MIGRATE_HIGHATOMIC free lists, while normal allocations can't. However, as a side effect, the watermark check will then also return true when there are pages only on the MIGRATE_ISOLATE list, or (prior to CMA conversion to ZONE_MOVABLE) on the MIGRATE_CMA list. Since the allocation cannot actually obtain isolated pages, and might not be able to obtain CMA pages, this can result in a false positive. The condition should be rare and perhaps the outcome is not a fatal one. Still, it's better if the watermark check is correct. There also shouldn't be a performance tradeoff here. Link: http://lkml.kernel.org/r/20171102125001.23708-1-vbabka@suse.cz Fixes: 97a16fc82a7c ("mm, page_alloc: only enforce watermarks for order-0 allocations") Signed-off-by: Vlastimil Babka Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Rik van Riel Cc: David Rientjes Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/page_alloc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d51c2087c498c6..2de080003693ce 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3011,9 +3011,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, if (!area->nr_free) continue; - if (alloc_harder) - return true; - for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { if (!list_empty(&area->free_list[mt])) return true; @@ -3025,6 +3022,9 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, return true; } #endif + if (alloc_harder && + !list_empty(&area->free_list[MIGRATE_HIGHATOMIC])) + return true; } return false; } From 671624872144abc37bc5e8f3b27987890f6e87f3 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Sun, 3 Dec 2017 12:12:45 -0800 Subject: [PATCH 1468/3326] netfilter: nfnetlink_cthelper: Add missing permission checks commit 4b380c42f7d00a395feede754f0bc2292eebe6e5 upstream. The capability check in nfnetlink_rcv() verifies that the caller has CAP_NET_ADMIN in the namespace that "owns" the netlink socket. However, nfnl_cthelper_list is shared by all net namespaces on the system. An unprivileged user can create user and net namespaces in which he holds CAP_NET_ADMIN to bypass the netlink_net_capable() check: $ nfct helper list nfct v1.4.4: netlink error: Operation not permitted $ vpnns -- nfct helper list { .name = ftp, .queuenum = 0, .l3protonum = 2, .l4protonum = 6, .priv_data_len = 24, .status = enabled, }; Add capable() checks in nfnetlink_cthelper, as this is cleaner than trying to generalize the solution. Signed-off-by: Kevin Cernekee Signed-off-by: Pablo Neira Ayuso Acked-by: Michal Kubecek Signed-off-by: Greg Kroah-Hartman --- net/netfilter/nfnetlink_cthelper.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index 41628b3936731b..d33ce6d5ebce92 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -407,6 +408,9 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl, struct nfnl_cthelper *nlcth; int ret = 0; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) return -EINVAL; @@ -611,6 +615,9 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl, struct nfnl_cthelper *nlcth; bool tuple_set = false; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nfnl_cthelper_dump_table, @@ -678,6 +685,9 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl, struct nfnl_cthelper *nlcth, *n; int j = 0, ret; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (tb[NFCTH_NAME]) helper_name = nla_data(tb[NFCTH_NAME]); From 19848ca7b7dad5153860239c893c1eadd603217e Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Tue, 5 Dec 2017 15:42:41 -0800 Subject: [PATCH 1469/3326] netfilter: xt_osf: Add missing permission checks commit 916a27901de01446bcf57ecca4783f6cff493309 upstream. The capability check in nfnetlink_rcv() verifies that the caller has CAP_NET_ADMIN in the namespace that "owns" the netlink socket. However, xt_osf_fingers is shared by all net namespaces on the system. An unprivileged user can create user and net namespaces in which he holds CAP_NET_ADMIN to bypass the netlink_net_capable() check: vpnns -- nfnl_osf -f /tmp/pf.os vpnns -- nfnl_osf -f /tmp/pf.os -d These non-root operations successfully modify the systemwide OS fingerprint list. Add new capable() checks so that they can't. Signed-off-by: Kevin Cernekee Signed-off-by: Pablo Neira Ayuso Acked-by: Michal Kubecek Signed-off-by: Greg Kroah-Hartman --- net/netfilter/xt_osf.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c index 36e14b1f061ddf..a34f314a8c2380 100644 --- a/net/netfilter/xt_osf.c +++ b/net/netfilter/xt_osf.c @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -70,6 +71,9 @@ static int xt_osf_add_callback(struct net *net, struct sock *ctnl, struct xt_osf_finger *kf = NULL, *sf; int err = 0; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (!osf_attrs[OSF_ATTR_FINGER]) return -EINVAL; @@ -115,6 +119,9 @@ static int xt_osf_remove_callback(struct net *net, struct sock *ctnl, struct xt_osf_finger *sf; int err = -ENOENT; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (!osf_attrs[OSF_ATTR_FINGER]) return -EINVAL; From ceab06885c09f685910779a8685b7659d66de4b5 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 10 Jan 2018 12:14:28 +0100 Subject: [PATCH 1470/3326] xfrm: Fix a race in the xdst pcpu cache. commit 76a4201191814a0061cb5c861fafb9ecaa764846 upstream. We need to run xfrm_resolve_and_create_bundle() with bottom halves off. Otherwise we may reuse an already released dst_enty when the xfrm lookup functions are called from process context. Fixes: c30d78c14a813db39a647b6a348b428 ("xfrm: add xdst pcpu cache") Reported-by: Darius Ski Signed-off-by: Steffen Klassert Acked-by: David Miller , Signed-off-by: Greg Kroah-Hartman --- net/xfrm/xfrm_policy.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 6bc16bb61b5533..688ed34f0671f5 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2056,8 +2056,11 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, if (num_xfrms <= 0) goto make_dummy_bundle; + local_bh_disable(); xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, - xflo->dst_orig); + xflo->dst_orig); + local_bh_enable(); + if (IS_ERR(xdst)) { err = PTR_ERR(xdst); if (err != -EAGAIN) @@ -2144,9 +2147,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, goto no_transform; } + local_bh_disable(); xdst = xfrm_resolve_and_create_bundle( pols, num_pols, fl, family, dst_orig); + local_bh_enable(); + if (IS_ERR(xdst)) { xfrm_pols_put(pols, num_pols); err = PTR_ERR(xdst); From 5cc765d69c4faf7dabfa5816add84ee52bc7912b Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 24 Jan 2018 15:28:17 +0100 Subject: [PATCH 1471/3326] Revert "module: Add retpoline tag to VERMAGIC" commit 5132ede0fe8092b043dae09a7cc32b8ae7272baa upstream. This reverts commit 6cfb521ac0d5b97470883ff9b7facae264b7ab12. Turns out distros do not want to make retpoline as part of their "ABI", so this patch should not have been merged. Sorry Andi, this was my fault, I suggested it when your original patch was the "correct" way of doing this instead. Reported-by: Jiri Kosina Fixes: 6cfb521ac0d5 ("module: Add retpoline tag to VERMAGIC") Acked-by: Andi Kleen Cc: Thomas Gleixner Cc: David Woodhouse Cc: rusty@rustcorp.com.au Cc: arjan.van.de.ven@intel.com Cc: jeyu@kernel.org Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/vermagic.h | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index 853291714ae0b9..bae807eb2933f9 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -31,17 +31,11 @@ #else #define MODULE_RANDSTRUCT_PLUGIN #endif -#ifdef RETPOLINE -#define MODULE_VERMAGIC_RETPOLINE "retpoline " -#else -#define MODULE_VERMAGIC_RETPOLINE "" -#endif #define VERMAGIC_STRING \ UTS_RELEASE " " \ MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ MODULE_ARCH_VERMAGIC \ - MODULE_RANDSTRUCT_PLUGIN \ - MODULE_VERMAGIC_RETPOLINE + MODULE_RANDSTRUCT_PLUGIN From 25cb145272525e819b21fb44dc338d08b6af816f Mon Sep 17 00:00:00 2001 From: Mark Furneaux Date: Mon, 22 Jan 2018 11:24:17 -0800 Subject: [PATCH 1472/3326] Input: xpad - add support for PDP Xbox One controllers commit e5c9c6a885fad00aa559b49d8fc23a60e290824e upstream. Adds support for the current lineup of Xbox One controllers from PDP (Performance Designed Products). These controllers are very picky with their initialization sequence and require an additional 2 packets before they send any input reports. Signed-off-by: Mark Furneaux Reviewed-by: Cameron Gutman Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/joystick/xpad.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index d86e59515b9c9e..d88d3e0f59fb83 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -229,6 +229,7 @@ static const struct xpad_device { { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, @@ -475,6 +476,22 @@ static const u8 xboxone_hori_init[] = { 0x00, 0x00, 0x00, 0x80, 0x00 }; +/* + * This packet is required for some of the PDP pads to start + * sending input reports. One of those pads is (0x0e6f:0x02ab). + */ +static const u8 xboxone_pdp_init1[] = { + 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14 +}; + +/* + * This packet is required for some of the PDP pads to start + * sending input reports. One of those pads is (0x0e6f:0x02ab). + */ +static const u8 xboxone_pdp_init2[] = { + 0x06, 0x20, 0x00, 0x02, 0x01, 0x00 +}; + /* * A specific rumble packet is required for some PowerA pads to start * sending input reports. One of those pads is (0x24c6:0x543a). @@ -505,6 +522,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), + XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1), + XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), From 7e4cd0ad576a75d26ea1658cf76c2ce00774ba73 Mon Sep 17 00:00:00 2001 From: Aaron Ma Date: Fri, 19 Jan 2018 09:43:39 -0800 Subject: [PATCH 1473/3326] Input: trackpoint - force 3 buttons if 0 button is reported commit f5d07b9e98022d50720e38aa936fc11c67868ece upstream. Lenovo introduced trackpoint compatible sticks with minimum PS/2 commands. They supposed to reply with 0x02, 0x03, or 0x04 in response to the "Read Extended ID" command, so we would know not to try certain extended commands. Unfortunately even some trackpoints reporting the original IBM version (0x01 firmware 0x0e) now respond with incorrect data to the "Get Extended Buttons" command: thinkpad_acpi: ThinkPad BIOS R0DET87W (1.87 ), EC unknown thinkpad_acpi: Lenovo ThinkPad E470, model 20H1004SGE psmouse serio2: trackpoint: IBM TrackPoint firmware: 0x0e, buttons: 0/0 Since there are no trackpoints without buttons, let's assume the trackpoint has 3 buttons when we get 0 response to the extended buttons query. Signed-off-by: Aaron Ma Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=196253 Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/mouse/trackpoint.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 0871010f18d5f4..92a8898682a6d8 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -383,6 +383,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties) if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) { psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n"); button_info = 0x33; + } else if (!button_info) { + psmouse_warn(psmouse, "got 0 in extended button data, assuming 3 buttons\n"); + button_info = 0x33; } psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL); From 203a60330e040f2aa326f028b8c41a774bcb0990 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Fri, 5 Jan 2018 13:28:47 -0800 Subject: [PATCH 1474/3326] Input: trackpoint - only expose supported controls for Elan, ALPS and NXP commit 2a924d71794c530e55e73d0ce2cc77233307eaa9 upstream. The newer trackpoints from ALPS, Elan and NXP implement a very limited subset of extended commands and controls that the original trackpoints implemented, so we should not be exposing not working controls in sysfs. The newer trackpoints also do not implement "Power On Reset" or "Read Extended Button Status", so we should not be using these commands during initialization. While we are at it, let's change "unsigned char" to u8 for byte data or bool for booleans and use better suited error codes instead of -1. Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/mouse/trackpoint.c | 248 +++++++++++++++++++------------ drivers/input/mouse/trackpoint.h | 34 +++-- 2 files changed, 172 insertions(+), 110 deletions(-) diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 92a8898682a6d8..bbd29220dbe998 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -19,6 +19,13 @@ #include "psmouse.h" #include "trackpoint.h" +static const char * const trackpoint_variants[] = { + [TP_VARIANT_IBM] = "IBM", + [TP_VARIANT_ALPS] = "ALPS", + [TP_VARIANT_ELAN] = "Elan", + [TP_VARIANT_NXP] = "NXP", +}; + /* * Power-on Reset: Resets all trackpoint parameters, including RAM values, * to defaults. @@ -26,7 +33,7 @@ */ static int trackpoint_power_on_reset(struct ps2dev *ps2dev) { - unsigned char results[2]; + u8 results[2]; int tries = 0; /* Issue POR command, and repeat up to once if 0xFC00 received */ @@ -38,7 +45,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev) /* Check for success response -- 0xAA00 */ if (results[0] != 0xAA || results[1] != 0x00) - return -1; + return -ENODEV; return 0; } @@ -46,8 +53,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev) /* * Device IO: read, write and toggle bit */ -static int trackpoint_read(struct ps2dev *ps2dev, - unsigned char loc, unsigned char *results) +static int trackpoint_read(struct ps2dev *ps2dev, u8 loc, u8 *results) { if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) || ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) { @@ -57,8 +63,7 @@ static int trackpoint_read(struct ps2dev *ps2dev, return 0; } -static int trackpoint_write(struct ps2dev *ps2dev, - unsigned char loc, unsigned char val) +static int trackpoint_write(struct ps2dev *ps2dev, u8 loc, u8 val) { if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) || ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) || @@ -70,8 +75,7 @@ static int trackpoint_write(struct ps2dev *ps2dev, return 0; } -static int trackpoint_toggle_bit(struct ps2dev *ps2dev, - unsigned char loc, unsigned char mask) +static int trackpoint_toggle_bit(struct ps2dev *ps2dev, u8 loc, u8 mask) { /* Bad things will happen if the loc param isn't in this range */ if (loc < 0x20 || loc >= 0x2F) @@ -87,11 +91,11 @@ static int trackpoint_toggle_bit(struct ps2dev *ps2dev, return 0; } -static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc, - unsigned char mask, unsigned char value) +static int trackpoint_update_bit(struct ps2dev *ps2dev, + u8 loc, u8 mask, u8 value) { int retval = 0; - unsigned char data; + u8 data; trackpoint_read(ps2dev, loc, &data); if (((data & mask) == mask) != !!value) @@ -105,17 +109,18 @@ static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc, */ struct trackpoint_attr_data { size_t field_offset; - unsigned char command; - unsigned char mask; - unsigned char inverted; - unsigned char power_on_default; + u8 command; + u8 mask; + bool inverted; + u8 power_on_default; }; -static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf) +static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, + void *data, char *buf) { struct trackpoint_data *tp = psmouse->private; struct trackpoint_attr_data *attr = data; - unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset); + u8 value = *(u8 *)((void *)tp + attr->field_offset); if (attr->inverted) value = !value; @@ -128,8 +133,8 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data, { struct trackpoint_data *tp = psmouse->private; struct trackpoint_attr_data *attr = data; - unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset); - unsigned char value; + u8 *field = (void *)tp + attr->field_offset; + u8 value; int err; err = kstrtou8(buf, 10, &value); @@ -157,17 +162,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data, { struct trackpoint_data *tp = psmouse->private; struct trackpoint_attr_data *attr = data; - unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset); - unsigned int value; + bool *field = (void *)tp + attr->field_offset; + bool value; int err; - err = kstrtouint(buf, 10, &value); + err = kstrtobool(buf, &value); if (err) return err; - if (value > 1) - return -EINVAL; - if (attr->inverted) value = !value; @@ -193,30 +195,6 @@ PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \ &trackpoint_attr_##_name, \ trackpoint_show_int_attr, trackpoint_set_bit_attr) -#define TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name) \ -do { \ - struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \ - \ - trackpoint_update_bit(&_psmouse->ps2dev, \ - _attr->command, _attr->mask, _tp->_name); \ -} while (0) - -#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \ -do { \ - if (!_power_on || \ - _tp->_name != trackpoint_attr_##_name.power_on_default) { \ - if (!trackpoint_attr_##_name.mask) \ - trackpoint_write(&_psmouse->ps2dev, \ - trackpoint_attr_##_name.command, \ - _tp->_name); \ - else \ - TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name); \ - } \ -} while (0) - -#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \ - (_tp->_name = trackpoint_attr_##_name.power_on_default) - TRACKPOINT_INT_ATTR(sensitivity, TP_SENS, TP_DEF_SENS); TRACKPOINT_INT_ATTR(speed, TP_SPEED, TP_DEF_SPEED); TRACKPOINT_INT_ATTR(inertia, TP_INERTIA, TP_DEF_INERTIA); @@ -229,13 +207,33 @@ TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME); TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV); TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME); -TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0, +TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, false, TP_DEF_PTSON); -TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0, +TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, false, TP_DEF_SKIPBACK); -TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1, +TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, true, TP_DEF_EXT_DEV); +static bool trackpoint_is_attr_available(struct psmouse *psmouse, + struct attribute *attr) +{ + struct trackpoint_data *tp = psmouse->private; + + return tp->variant_id == TP_VARIANT_IBM || + attr == &psmouse_attr_sensitivity.dattr.attr || + attr == &psmouse_attr_press_to_select.dattr.attr; +} + +static umode_t trackpoint_is_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct serio *serio = to_serio_port(dev); + struct psmouse *psmouse = serio_get_drvdata(serio); + + return trackpoint_is_attr_available(psmouse, attr) ? attr->mode : 0; +} + static struct attribute *trackpoint_attrs[] = { &psmouse_attr_sensitivity.dattr.attr, &psmouse_attr_speed.dattr.attr, @@ -255,24 +253,56 @@ static struct attribute *trackpoint_attrs[] = { }; static struct attribute_group trackpoint_attr_group = { - .attrs = trackpoint_attrs, + .is_visible = trackpoint_is_attr_visible, + .attrs = trackpoint_attrs, }; -static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *firmware_id) -{ - unsigned char param[2] = { 0 }; +#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \ +do { \ + struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \ + \ + if ((!_power_on || _tp->_name != _attr->power_on_default) && \ + trackpoint_is_attr_available(_psmouse, \ + &psmouse_attr_##_name.dattr.attr)) { \ + if (!_attr->mask) \ + trackpoint_write(&_psmouse->ps2dev, \ + _attr->command, _tp->_name); \ + else \ + trackpoint_update_bit(&_psmouse->ps2dev, \ + _attr->command, _attr->mask, \ + _tp->_name); \ + } \ +} while (0) - if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) - return -1; +#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \ +do { \ + _tp->_name = trackpoint_attr_##_name.power_on_default; \ +} while (0) - /* add new TP ID. */ - if (!(param[0] & TP_MAGIC_IDENT)) - return -1; +static int trackpoint_start_protocol(struct psmouse *psmouse, + u8 *variant_id, u8 *firmware_id) +{ + u8 param[2] = { 0 }; + int error; - if (firmware_id) - *firmware_id = param[1]; + error = ps2_command(&psmouse->ps2dev, + param, MAKE_PS2_CMD(0, 2, TP_READ_ID)); + if (error) + return error; + + switch (param[0]) { + case TP_VARIANT_IBM: + case TP_VARIANT_ALPS: + case TP_VARIANT_ELAN: + case TP_VARIANT_NXP: + if (variant_id) + *variant_id = param[0]; + if (firmware_id) + *firmware_id = param[1]; + return 0; + } - return 0; + return -ENODEV; } /* @@ -285,7 +315,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state) { struct trackpoint_data *tp = psmouse->private; - if (!in_power_on_state) { + if (!in_power_on_state && tp->variant_id == TP_VARIANT_IBM) { /* * Disable features that may make device unusable * with this driver. @@ -347,7 +377,8 @@ static void trackpoint_defaults(struct trackpoint_data *tp) static void trackpoint_disconnect(struct psmouse *psmouse) { - sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group); + device_remove_group(&psmouse->ps2dev.serio->dev, + &trackpoint_attr_group); kfree(psmouse->private); psmouse->private = NULL; @@ -355,14 +386,20 @@ static void trackpoint_disconnect(struct psmouse *psmouse) static int trackpoint_reconnect(struct psmouse *psmouse) { - int reset_fail; + struct trackpoint_data *tp = psmouse->private; + int error; + bool was_reset; - if (trackpoint_start_protocol(psmouse, NULL)) - return -1; + error = trackpoint_start_protocol(psmouse, NULL, NULL); + if (error) + return error; - reset_fail = trackpoint_power_on_reset(&psmouse->ps2dev); - if (trackpoint_sync(psmouse, !reset_fail)) - return -1; + was_reset = tp->variant_id == TP_VARIANT_IBM && + trackpoint_power_on_reset(&psmouse->ps2dev) == 0; + + error = trackpoint_sync(psmouse, was_reset); + if (error) + return error; return 0; } @@ -370,49 +407,66 @@ static int trackpoint_reconnect(struct psmouse *psmouse) int trackpoint_detect(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; - unsigned char firmware_id; - unsigned char button_info; + struct trackpoint_data *tp; + u8 variant_id; + u8 firmware_id; + u8 button_info; int error; - if (trackpoint_start_protocol(psmouse, &firmware_id)) - return -1; + error = trackpoint_start_protocol(psmouse, &variant_id, &firmware_id); + if (error) + return error; if (!set_properties) return 0; - if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) { - psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n"); - button_info = 0x33; - } else if (!button_info) { - psmouse_warn(psmouse, "got 0 in extended button data, assuming 3 buttons\n"); - button_info = 0x33; - } - - psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL); - if (!psmouse->private) + tp = kzalloc(sizeof(*tp), GFP_KERNEL); + if (!tp) return -ENOMEM; - psmouse->vendor = "IBM"; + trackpoint_defaults(tp); + tp->variant_id = variant_id; + tp->firmware_id = firmware_id; + + psmouse->private = tp; + + psmouse->vendor = trackpoint_variants[variant_id]; psmouse->name = "TrackPoint"; psmouse->reconnect = trackpoint_reconnect; psmouse->disconnect = trackpoint_disconnect; + if (variant_id != TP_VARIANT_IBM) { + /* Newer variants do not support extended button query. */ + button_info = 0x33; + } else { + error = trackpoint_read(ps2dev, TP_EXT_BTN, &button_info); + if (error) { + psmouse_warn(psmouse, + "failed to get extended button data, assuming 3 buttons\n"); + button_info = 0x33; + } else if (!button_info) { + psmouse_warn(psmouse, + "got 0 in extended button data, assuming 3 buttons\n"); + button_info = 0x33; + } + } + if ((button_info & 0x0f) >= 3) - __set_bit(BTN_MIDDLE, psmouse->dev->keybit); + input_set_capability(psmouse->dev, EV_KEY, BTN_MIDDLE); __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit); __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit); - trackpoint_defaults(psmouse->private); - - error = trackpoint_power_on_reset(ps2dev); - - /* Write defaults to TP only if reset fails. */ - if (error) + if (variant_id != TP_VARIANT_IBM || + trackpoint_power_on_reset(ps2dev) != 0) { + /* + * Write defaults to TP if we did not reset the trackpoint. + */ trackpoint_sync(psmouse, false); + } - error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group); + error = device_add_group(&ps2dev->serio->dev, &trackpoint_attr_group); if (error) { psmouse_err(psmouse, "failed to create sysfs attributes, error: %d\n", @@ -423,8 +477,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties) } psmouse_info(psmouse, - "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n", - firmware_id, + "%s TrackPoint firmware: 0x%02x, buttons: %d/%d\n", + psmouse->vendor, firmware_id, (button_info & 0xf0) >> 4, button_info & 0x0f); return 0; diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h index 88055755f82e23..10a0391482343d 100644 --- a/drivers/input/mouse/trackpoint.h +++ b/drivers/input/mouse/trackpoint.h @@ -21,10 +21,16 @@ #define TP_COMMAND 0xE2 /* Commands start with this */ #define TP_READ_ID 0xE1 /* Sent for device identification */ -#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */ - /* by the firmware ID */ - /* Firmware ID includes 0x1, 0x2, 0x3 */ +/* + * Valid first byte responses to the "Read Secondary ID" (0xE1) command. + * 0x01 was the original IBM trackpoint, others implement very limited + * subset of trackpoint features. + */ +#define TP_VARIANT_IBM 0x01 +#define TP_VARIANT_ALPS 0x02 +#define TP_VARIANT_ELAN 0x03 +#define TP_VARIANT_NXP 0x04 /* * Commands @@ -136,18 +142,20 @@ #define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd)) -struct trackpoint_data -{ - unsigned char sensitivity, speed, inertia, reach; - unsigned char draghys, mindrag; - unsigned char thresh, upthresh; - unsigned char ztime, jenks; - unsigned char drift_time; +struct trackpoint_data { + u8 variant_id; + u8 firmware_id; + + u8 sensitivity, speed, inertia, reach; + u8 draghys, mindrag; + u8 thresh, upthresh; + u8 ztime, jenks; + u8 drift_time; /* toggles */ - unsigned char press_to_select; - unsigned char skipback; - unsigned char ext_dev; + bool press_to_select; + bool skipback; + bool ext_dev; }; #ifdef CONFIG_MOUSE_PS2_TRACKPOINT From 5c7b881331f825fe97a47aed6bb4a32c542eca2d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 23 Jan 2018 15:17:05 -0500 Subject: [PATCH 1475/3326] Btrfs: fix stale entries in readdir commit e4fd493c0541d36953f7b9d3bfced67a1321792f upstream. In fixing the readdir+pagefault deadlock I accidentally introduced a stale entry regression in readdir. If we get close to full for the temporary buffer, and then skip a few delayed deletions, and then try to add another entry that won't fit, we will emit the entries we found and retry. Unfortunately we delete entries from our del_list as we find them, assuming we won't need them. However our pos will be with whatever our last entry was, which could be before the delayed deletions we skipped, so the next search will add the deleted entries back into our readdir buffer. So instead don't delete entries we find in our del_list so we can make sure we always find our delayed deletions. This is a slight perf hit for readdir with lots of pending deletions, but hopefully this isn't a common occurrence. If it is we can revist this and optimize it. Fixes: 23b5ec74943f ("btrfs: fix readdir deadlock with pagefault") Signed-off-by: Josef Bacik Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/delayed-inode.c | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 0c4b690cf761b9..04f39111fafb4b 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1677,28 +1677,18 @@ void btrfs_readdir_put_delayed_items(struct inode *inode, int btrfs_should_delete_dir_index(struct list_head *del_list, u64 index) { - struct btrfs_delayed_item *curr, *next; - int ret; - - if (list_empty(del_list)) - return 0; + struct btrfs_delayed_item *curr; + int ret = 0; - list_for_each_entry_safe(curr, next, del_list, readdir_list) { + list_for_each_entry(curr, del_list, readdir_list) { if (curr->key.offset > index) break; - - list_del(&curr->readdir_list); - ret = (curr->key.offset == index); - - if (refcount_dec_and_test(&curr->refs)) - kfree(curr); - - if (ret) - return 1; - else - continue; + if (curr->key.offset == index) { + ret = 1; + break; + } } - return 0; + return ret; } /* From d529ba9b270c8713440c63f64af6a7d522591990 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Fri, 22 Dec 2017 10:54:20 +0100 Subject: [PATCH 1476/3326] KVM: s390: add proper locking for CMMA migration bitmap commit 1de1ea7efeb9e8543212210e34518b4049ccd285 upstream. Some parts of the cmma migration bitmap is already protected with the kvm->lock (e.g. the migration start). On the other hand the read of the cmma bits is not protected against a concurrent free, neither is the emulation of the ESSA instruction. Let's extend the locking to all related ioctls by using the slots lock for - kvm_s390_vm_start_migration - kvm_s390_vm_stop_migration - kvm_s390_set_cmma_bits - kvm_s390_get_cmma_bits In addition to that, we use synchronize_srcu before freeing the migration structure as all users hold kvm->srcu for read. (e.g. the ESSA handler). Reported-by: David Hildenbrand Signed-off-by: Christian Borntraeger Fixes: 190df4a212a7 (KVM: s390: CMMA tracking, ESSA emulation, migration mode) Reviewed-by: Claudio Imbrenda Reviewed-by: David Hildenbrand Reviewed-by: Cornelia Huck Signed-off-by: Greg Kroah-Hartman --- arch/s390/kvm/kvm-s390.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index b87a930c2201bc..6c88cb18ace2e3 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -768,7 +768,7 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) /* * Must be called with kvm->srcu held to avoid races on memslots, and with - * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration. + * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration. */ static int kvm_s390_vm_start_migration(struct kvm *kvm) { @@ -824,7 +824,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) } /* - * Must be called with kvm->lock to avoid races with ourselves and + * Must be called with kvm->slots_lock to avoid races with ourselves and * kvm_s390_vm_start_migration. */ static int kvm_s390_vm_stop_migration(struct kvm *kvm) @@ -839,6 +839,8 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm) if (kvm->arch.use_cmma) { kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); + /* We have to wait for the essa emulation to finish */ + synchronize_srcu(&kvm->srcu); vfree(mgs->pgste_bitmap); } kfree(mgs); @@ -848,14 +850,12 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm) static int kvm_s390_vm_set_migration(struct kvm *kvm, struct kvm_device_attr *attr) { - int idx, res = -ENXIO; + int res = -ENXIO; - mutex_lock(&kvm->lock); + mutex_lock(&kvm->slots_lock); switch (attr->attr) { case KVM_S390_VM_MIGRATION_START: - idx = srcu_read_lock(&kvm->srcu); res = kvm_s390_vm_start_migration(kvm); - srcu_read_unlock(&kvm->srcu, idx); break; case KVM_S390_VM_MIGRATION_STOP: res = kvm_s390_vm_stop_migration(kvm); @@ -863,7 +863,7 @@ static int kvm_s390_vm_set_migration(struct kvm *kvm, default: break; } - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->slots_lock); return res; } @@ -1753,7 +1753,9 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&args, argp, sizeof(args))) break; + mutex_lock(&kvm->slots_lock); r = kvm_s390_get_cmma_bits(kvm, &args); + mutex_unlock(&kvm->slots_lock); if (!r) { r = copy_to_user(argp, &args, sizeof(args)); if (r) @@ -1767,7 +1769,9 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&args, argp, sizeof(args))) break; + mutex_lock(&kvm->slots_lock); r = kvm_s390_set_cmma_bits(kvm, &args); + mutex_unlock(&kvm->slots_lock); break; } default: From 326efb49e153fbb9ccacaed9858bd9da91204061 Mon Sep 17 00:00:00 2001 From: Martin Brandenburg Date: Thu, 25 Jan 2018 19:39:44 -0500 Subject: [PATCH 1477/3326] orangefs: fix deadlock; do not write i_size in read_iter commit 6793f1c450b1533a5e9c2493490de771d38b24f9 upstream. After do_readv_writev, the inode cache is invalidated anyway, so i_size will never be read. It will be fetched from the server which will also know about updates from other machines. Fixes deadlock on 32-bit SMP. See https://marc.info/?l=linux-fsdevel&m=151268557427760&w=2 Signed-off-by: Martin Brandenburg Cc: Al Viro Cc: Mike Marshall Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/orangefs/file.c | 7 ++----- fs/orangefs/orangefs-kernel.h | 11 ----------- 2 files changed, 2 insertions(+), 16 deletions(-) diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c index e4a8e6a7eb17b6..962bf4824283d0 100644 --- a/fs/orangefs/file.c +++ b/fs/orangefs/file.c @@ -446,7 +446,7 @@ ssize_t orangefs_inode_read(struct inode *inode, static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; - loff_t pos = *(&iocb->ki_pos); + loff_t pos = iocb->ki_pos; ssize_t rc = 0; BUG_ON(iocb->private); @@ -486,9 +486,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite } } - if (file->f_pos > i_size_read(file->f_mapping->host)) - orangefs_i_size_write(file->f_mapping->host, file->f_pos); - rc = generic_write_checks(iocb, iter); if (rc <= 0) { @@ -502,7 +499,7 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite * pos to the end of the file, so we will wait till now to set * pos... */ - pos = *(&iocb->ki_pos); + pos = iocb->ki_pos; rc = do_readv_writev(ORANGEFS_IO_WRITE, file, diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h index 004af348fb80ec..c244bbf494bc50 100644 --- a/fs/orangefs/orangefs-kernel.h +++ b/fs/orangefs/orangefs-kernel.h @@ -566,17 +566,6 @@ do { \ sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE; \ } while (0) -static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size) -{ -#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) - inode_lock(inode); -#endif - i_size_write(inode, i_size); -#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) - inode_unlock(inode); -#endif -} - static inline void orangefs_set_timeout(struct dentry *dentry) { unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; From 124047a81eae1fad08482aa122e683f602fe85dc Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 13 Jan 2018 11:35:15 +0000 Subject: [PATCH 1478/3326] ARM: net: bpf: avoid 'bx' instruction on non-Thumb capable CPUs commit e9062481824384f00299971f923fecf6b3668001 upstream. Avoid the 'bx' instruction on CPUs that have no support for Thumb and thus do not implement this instruction by moving the generation of this opcode to a separate function that selects between: bx reg and mov pc, reg according to the capabilities of the CPU. Fixes: 39c13c204bb1 ("arm: eBPF JIT compiler") Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/net/bpf_jit_32.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index c199990e12b620..4efb3743a89e05 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -285,16 +285,20 @@ static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx) emit_mov_i_no8m(rd, val, ctx); } -static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) +static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx) { - ctx->seen |= SEEN_CALL; -#if __LINUX_ARM_ARCH__ < 5 - emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); - if (elf_hwcap & HWCAP_THUMB) emit(ARM_BX(tgt_reg), ctx); else emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); +} + +static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) +{ + ctx->seen |= SEEN_CALL; +#if __LINUX_ARM_ARCH__ < 5 + emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); + emit_bx_r(tgt_reg, ctx); #else emit(ARM_BLX_R(tgt_reg), ctx); #endif @@ -997,7 +1001,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit_a32_mov_i(tmp2[1], off, false, ctx); emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx); emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx); - emit(ARM_BX(tmp[1]), ctx); + emit_bx_r(tmp[1], ctx); /* out: */ if (out_offset == -1) @@ -1166,7 +1170,7 @@ static void build_epilogue(struct jit_ctx *ctx) emit(ARM_POP(reg_set), ctx); /* Return back to the callee function */ if (!(ctx->seen & SEEN_CALL)) - emit(ARM_BX(ARM_LR), ctx); + emit_bx_r(ARM_LR, ctx); #endif } From e7119caa7cd93ff4a01788ebd2fc95edeaa16da3 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 13 Jan 2018 11:39:54 +0000 Subject: [PATCH 1479/3326] ARM: net: bpf: fix tail call jumps commit f4483f2cc1fdc03488c8a1452e545545ae5bda93 upstream. When a tail call fails, it is documented that the tail call should continue execution at the following instruction. An example tail call sequence is: 12: (85) call bpf_tail_call#12 13: (b7) r0 = 0 14: (95) exit The ARM assembler for the tail call in this case ends up branching to instruction 14 instead of instruction 13, resulting in the BPF filter returning a non-zero value: 178: ldr r8, [sp, #588] ; insn 12 17c: ldr r6, [r8, r6] 180: ldr r8, [sp, #580] 184: cmp r8, r6 188: bcs 0x1e8 18c: ldr r6, [sp, #524] 190: ldr r7, [sp, #528] 194: cmp r7, #0 198: cmpeq r6, #32 19c: bhi 0x1e8 1a0: adds r6, r6, #1 1a4: adc r7, r7, #0 1a8: str r6, [sp, #524] 1ac: str r7, [sp, #528] 1b0: mov r6, #104 1b4: ldr r8, [sp, #588] 1b8: add r6, r8, r6 1bc: ldr r8, [sp, #580] 1c0: lsl r7, r8, #2 1c4: ldr r6, [r6, r7] 1c8: cmp r6, #0 1cc: beq 0x1e8 1d0: mov r8, #32 1d4: ldr r6, [r6, r8] 1d8: add r6, r6, #44 1dc: bx r6 1e0: mov r0, #0 ; insn 13 1e4: mov r1, #0 1e8: add sp, sp, #596 ; insn 14 1ec: pop {r4, r5, r6, r7, r8, sl, pc} For other sequences, the tail call could end up branching midway through the following BPF instructions, or maybe off the end of the function, leading to unknown behaviours. Fixes: 39c13c204bb1 ("arm: eBPF JIT compiler") Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/net/bpf_jit_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 4efb3743a89e05..ce36d2cab50ca9 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -949,7 +949,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) const u8 *tcc = bpf2a32[TCALL_CNT]; const int idx0 = ctx->idx; #define cur_offset (ctx->idx - idx0) -#define jmp_offset (out_offset - (cur_offset)) +#define jmp_offset (out_offset - (cur_offset) - 2) u32 off, lo, hi; /* if (index >= array->map.max_entries) From bfd2c2b9915cb3c82743251ab365b64ca2280e19 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 13 Jan 2018 16:10:07 +0000 Subject: [PATCH 1480/3326] ARM: net: bpf: fix stack alignment commit d1220efd23484c72c82d5471f05daeb35b5d1916 upstream. As per 2dede2d8e925 ("ARM EABI: stack pointer must be 64-bit aligned after a CPU exception") the stack should be aligned to a 64-bit boundary on EABI systems. Ensure that the eBPF JIT appropraitely aligns the stack. Fixes: 39c13c204bb1 ("arm: eBPF JIT compiler") Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/net/bpf_jit_32.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index ce36d2cab50ca9..d00a0eb0386ee9 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -179,8 +179,13 @@ static void jit_fill_hole(void *area, unsigned int size) *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); } -/* Stack must be multiples of 16 Bytes */ -#define STACK_ALIGN(sz) (((sz) + 3) & ~3) +#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) +/* EABI requires the stack to be aligned to 64-bit boundaries */ +#define STACK_ALIGNMENT 8 +#else +/* Stack must be aligned to 32-bit boundaries */ +#define STACK_ALIGNMENT 4 +#endif /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4, * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9, @@ -194,7 +199,7 @@ static void jit_fill_hole(void *area, unsigned int size) + SCRATCH_SIZE + \ + 4 /* extra for skb_copy_bits buffer */) -#define STACK_SIZE STACK_ALIGN(_STACK_SIZE) +#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT) /* Get the offset of eBPF REGISTERs stored on scratch space. */ #define STACK_VAR(off) (STACK_SIZE-off-4) From 403f4c6ae9d15afd0776316728db18f20600dba6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 13 Jan 2018 21:26:14 +0000 Subject: [PATCH 1481/3326] ARM: net: bpf: move stack documentation commit 70ec3a6c2c11e4b0e107a65de943a082f9aff351 upstream. Move the stack documentation towards the top of the file, where it's relevant for things like the register layout. Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/net/bpf_jit_32.c | 42 +++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index d00a0eb0386ee9..e90229d58c77fb 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -27,6 +27,27 @@ int bpf_jit_enable __read_mostly; +/* + * eBPF prog stack layout + * + * high + * original ARM_SP => +-----+ eBPF prologue + * |FP/LR| + * current ARM_FP => +-----+ + * | ... | callee saved registers + * eBPF fp register => +-----+ <= (BPF_FP) + * | ... | eBPF JIT scratch space + * | | eBPF prog stack + * +-----+ + * |RSVD | JIT scratchpad + * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE) + * | | + * | ... | Function call stack + * | | + * +-----+ + * low + */ + #define STACK_OFFSET(k) (k) #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */ @@ -1091,27 +1112,6 @@ static void build_prologue(struct jit_ctx *ctx) u16 reg_set = 0; - /* - * eBPF prog stack layout - * - * high - * original ARM_SP => +-----+ eBPF prologue - * |FP/LR| - * current ARM_FP => +-----+ - * | ... | callee saved registers - * eBPF fp register => +-----+ <= (BPF_FP) - * | ... | eBPF JIT scratch space - * | | eBPF prog stack - * +-----+ - * |RSVD | JIT scratchpad - * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE) - * | | - * | ... | Function call stack - * | | - * +-----+ - * low - */ - /* Save callee saved registers. */ reg_set |= (1< Date: Sat, 13 Jan 2018 22:51:27 +0000 Subject: [PATCH 1482/3326] ARM: net: bpf: correct stack layout documentation commit 0005e55a79cfda88199e41a406a829c88d708c67 upstream. The stack layout documentation incorrectly suggests that the BPF JIT scratch space starts immediately below BPF_FP. This is not correct, so let's fix the documentation to reflect reality. Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/net/bpf_jit_32.c | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index e90229d58c77fb..dcb3181e85f39b 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -28,24 +28,43 @@ int bpf_jit_enable __read_mostly; /* - * eBPF prog stack layout + * eBPF prog stack layout: * * high - * original ARM_SP => +-----+ eBPF prologue - * |FP/LR| - * current ARM_FP => +-----+ - * | ... | callee saved registers - * eBPF fp register => +-----+ <= (BPF_FP) + * original ARM_SP => +-----+ + * | | callee saved registers + * +-----+ <= (BPF_FP + SCRATCH_SIZE) * | ... | eBPF JIT scratch space - * | | eBPF prog stack + * eBPF fp register => +-----+ + * (BPF_FP) | ... | eBPF prog stack * +-----+ * |RSVD | JIT scratchpad - * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE) + * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE) * | | * | ... | Function call stack * | | * +-----+ * low + * + * The callee saved registers depends on whether frame pointers are enabled. + * With frame pointers (to be compliant with the ABI): + * + * high + * original ARM_SP => +------------------+ \ + * | pc | | + * current ARM_FP => +------------------+ } callee saved registers + * |r4-r8,r10,fp,ip,lr| | + * +------------------+ / + * low + * + * Without frame pointers: + * + * high + * original ARM_SP => +------------------+ + * | lr | (optional) + * | r4-r8,r10 | callee saved registers + * +------------------+ + * low */ #define STACK_OFFSET(k) (k) From 0da4a4d0c7f941638e2e7e515c2d758de8976418 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 13 Jan 2018 22:38:18 +0000 Subject: [PATCH 1483/3326] ARM: net: bpf: fix register saving commit 02088d9b392f605c892894b46aa8c83e3abd0115 upstream. When an eBPF program tail-calls another eBPF program, it enters it after the prologue to avoid having complex stack manipulations. This can lead to kernel oopses, and similar. Resolve this by always using a fixed stack layout, a CPU register frame pointer, and using this when reloading registers before returning. Fixes: 39c13c204bb1 ("arm: eBPF JIT compiler") Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/net/bpf_jit_32.c | 80 +++++++++++---------------------------- 1 file changed, 22 insertions(+), 58 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index dcb3181e85f39b..95bb3f896c8f42 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -61,20 +61,24 @@ int bpf_jit_enable __read_mostly; * * high * original ARM_SP => +------------------+ - * | lr | (optional) - * | r4-r8,r10 | callee saved registers - * +------------------+ + * | r4-r8,r10,fp,lr | callee saved registers + * current ARM_FP => +------------------+ * low + * + * When popping registers off the stack at the end of a BPF function, we + * reference them via the current ARM_FP register. */ +#define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \ + 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \ + 1 << ARM_FP) +#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR) +#define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC) #define STACK_OFFSET(k) (k) #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */ #define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */ -/* Flags used for JIT optimization */ -#define SEEN_CALL (1 << 0) - #define FLAG_IMM_OVERFLOW (1 << 0) /* @@ -135,7 +139,6 @@ static const u8 bpf2a32[][2] = { * idx : index of current last JITed instruction. * prologue_bytes : bytes used in prologue. * epilogue_offset : offset of epilogue starting. - * seen : bit mask used for JIT optimization. * offsets : array of eBPF instruction offsets in * JITed code. * target : final JITed code. @@ -150,7 +153,6 @@ struct jit_ctx { unsigned int idx; unsigned int prologue_bytes; unsigned int epilogue_offset; - u32 seen; u32 flags; u32 *offsets; u32 *target; @@ -340,7 +342,6 @@ static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx) static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) { - ctx->seen |= SEEN_CALL; #if __LINUX_ARM_ARCH__ < 5 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); emit_bx_r(tgt_reg, ctx); @@ -403,7 +404,6 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) } /* Call appropriate function */ - ctx->seen |= SEEN_CALL; emit_mov_i(ARM_IP, op == BPF_DIV ? (u32)jit_udiv32 : (u32)jit_mod32, ctx); emit_blx_r(ARM_IP, ctx); @@ -669,8 +669,6 @@ static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk, /* Do LSH operation */ emit(ARM_SUB_I(ARM_IP, rt, 32), ctx); emit(ARM_RSB_I(tmp2[0], rt, 32), ctx); - /* As we are using ARM_LR */ - ctx->seen |= SEEN_CALL; emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx); emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx); @@ -705,8 +703,6 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk, /* Do the ARSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); - /* As we are using ARM_LR */ - ctx->seen |= SEEN_CALL; emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); _emit(ARM_COND_MI, ARM_B(0), ctx); @@ -741,8 +737,6 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk, /* Do LSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); - /* As we are using ARM_LR */ - ctx->seen |= SEEN_CALL; emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx); @@ -877,8 +871,6 @@ static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk, /* Do Multiplication */ emit(ARM_MUL(ARM_IP, rd, rn), ctx); emit(ARM_MUL(ARM_LR, rm, rt), ctx); - /* As we are using ARM_LR */ - ctx->seen |= SEEN_CALL; emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx); emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx); @@ -955,7 +947,6 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm, const u8 rn, struct jit_ctx *ctx, u8 op) { switch (op) { case BPF_JSET: - ctx->seen |= SEEN_CALL; emit(ARM_AND_R(ARM_IP, rt, rn), ctx); emit(ARM_AND_R(ARM_LR, rd, rm), ctx); emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); @@ -1119,33 +1110,22 @@ static void build_prologue(struct jit_ctx *ctx) const u8 r2 = bpf2a32[BPF_REG_1][1]; const u8 r3 = bpf2a32[BPF_REG_1][0]; const u8 r4 = bpf2a32[BPF_REG_6][1]; - const u8 r5 = bpf2a32[BPF_REG_6][0]; - const u8 r6 = bpf2a32[TMP_REG_1][1]; - const u8 r7 = bpf2a32[TMP_REG_1][0]; - const u8 r8 = bpf2a32[TMP_REG_2][1]; - const u8 r10 = bpf2a32[TMP_REG_2][0]; const u8 fplo = bpf2a32[BPF_REG_FP][1]; const u8 fphi = bpf2a32[BPF_REG_FP][0]; - const u8 sp = ARM_SP; const u8 *tcc = bpf2a32[TCALL_CNT]; - u16 reg_set = 0; - /* Save callee saved registers. */ - reg_set |= (1<seen & SEEN_CALL) - reg_set |= (1<stack_size = imm8m(STACK_SIZE); @@ -1168,33 +1148,19 @@ static void build_prologue(struct jit_ctx *ctx) /* end of prologue */ } +/* restore callee saved registers. */ static void build_epilogue(struct jit_ctx *ctx) { - const u8 r4 = bpf2a32[BPF_REG_6][1]; - const u8 r5 = bpf2a32[BPF_REG_6][0]; - const u8 r6 = bpf2a32[TMP_REG_1][1]; - const u8 r7 = bpf2a32[TMP_REG_1][0]; - const u8 r8 = bpf2a32[TMP_REG_2][1]; - const u8 r10 = bpf2a32[TMP_REG_2][0]; - u16 reg_set = 0; - - /* unwind function call stack */ - emit(ARM_ADD_I(ARM_SP, ARM_SP, ctx->stack_size), ctx); - - /* restore callee saved registers. */ - reg_set |= (1<seen & SEEN_CALL) - reg_set |= (1<seen & SEEN_CALL)) - emit_bx_r(ARM_LR, ctx); + emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx); + emit(ARM_POP(CALLEE_POP_MASK), ctx); #endif } @@ -1422,8 +1388,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) emit_rev32(rt, rt, ctx); goto emit_bswap_uxt; case 64: - /* Because of the usage of ARM_LR */ - ctx->seen |= SEEN_CALL; emit_rev32(ARM_LR, rt, ctx); emit_rev32(rt, rd, ctx); emit(ARM_MOV_R(rd, ARM_LR), ctx); From 5911dd3f92aefd86b4a86286c94398681c453802 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 13 Jan 2018 21:06:16 +0000 Subject: [PATCH 1484/3326] ARM: net: bpf: fix LDX instructions commit ec19e02b343db991d2d1610c409efefebf4e2ca9 upstream. When the source and destination register are identical, our JIT does not generate correct code, which leads to kernel oopses. Fix this by (a) generating more efficient code, and (b) making use of the temporary earlier if we will overwrite the address register. Fixes: 39c13c204bb1 ("arm: eBPF JIT compiler") Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/net/bpf_jit_32.c | 61 +++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 28 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 95bb3f896c8f42..715e7250de862b 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -913,33 +913,53 @@ static inline void emit_str_r(const u8 dst, const u8 src, bool dstk, } /* dst = *(size*)(src + off) */ -static inline void emit_ldx_r(const u8 dst, const u8 src, bool dstk, - const s32 off, struct jit_ctx *ctx, const u8 sz){ +static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk, + s32 off, struct jit_ctx *ctx, const u8 sz){ const u8 *tmp = bpf2a32[TMP_REG_1]; - u8 rd = dstk ? tmp[1] : dst; + const u8 *rd = dstk ? tmp : dst; u8 rm = src; + s32 off_max; - if (off) { + if (sz == BPF_H) + off_max = 0xff; + else + off_max = 0xfff; + + if (off < 0 || off > off_max) { emit_a32_mov_i(tmp[0], off, false, ctx); emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); rm = tmp[0]; + off = 0; + } else if (rd[1] == rm) { + emit(ARM_MOV_R(tmp[0], rm), ctx); + rm = tmp[0]; } switch (sz) { - case BPF_W: - /* Load a Word */ - emit(ARM_LDR_I(rd, rm, 0), ctx); + case BPF_B: + /* Load a Byte */ + emit(ARM_LDRB_I(rd[1], rm, off), ctx); + emit_a32_mov_i(dst[0], 0, dstk, ctx); break; case BPF_H: /* Load a HalfWord */ - emit(ARM_LDRH_I(rd, rm, 0), ctx); + emit(ARM_LDRH_I(rd[1], rm, off), ctx); + emit_a32_mov_i(dst[0], 0, dstk, ctx); break; - case BPF_B: - /* Load a Byte */ - emit(ARM_LDRB_I(rd, rm, 0), ctx); + case BPF_W: + /* Load a Word */ + emit(ARM_LDR_I(rd[1], rm, off), ctx); + emit_a32_mov_i(dst[0], 0, dstk, ctx); + break; + case BPF_DW: + /* Load a Double Word */ + emit(ARM_LDR_I(rd[1], rm, off), ctx); + emit(ARM_LDR_I(rd[0], rm, off + 4), ctx); break; } if (dstk) - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); + emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx); + if (dstk && sz == BPF_DW) + emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx); } /* Arithmatic Operation */ @@ -1440,22 +1460,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) rn = sstk ? tmp2[1] : src_lo; if (sstk) emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); - switch (BPF_SIZE(code)) { - case BPF_W: - /* Load a Word */ - case BPF_H: - /* Load a Half-Word */ - case BPF_B: - /* Load a Byte */ - emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_SIZE(code)); - emit_a32_mov_i(dst_hi, 0, dstk, ctx); - break; - case BPF_DW: - /* Load a double word */ - emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_W); - emit_ldx_r(dst_hi, rn, dstk, off+4, ctx, BPF_W); - break; - } + emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code)); break; /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ case BPF_LD | BPF_ABS | BPF_W: From dd55bfca560733d37f53a04cf628c5d6a7d5d0da Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 13 Jan 2018 12:11:26 +0000 Subject: [PATCH 1485/3326] ARM: net: bpf: clarify tail_call index commit 091f02483df7b56615b524491f404e574c5e0668 upstream. As per 90caccdd8cc0 ("bpf: fix bpf_tail_call() x64 JIT"), the index used for array lookup is defined to be 32-bit wide. Update a misleading comment that suggests it is 64-bit wide. Fixes: 39c13c204bb1 ("arm: eBPF JIT compiler") Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/net/bpf_jit_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 715e7250de862b..323a4df59a6c0e 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1016,7 +1016,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit_a32_mov_i(tmp[1], off, false, ctx); emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx); - /* index (64 bit) */ + /* index is 32-bit for arrays */ emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx); /* index >= array->map.max_entries */ emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx); From 9ad970c8a13595e38d3af98114bcbbec6d8a5be4 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Thu, 18 Jan 2018 15:58:21 +0100 Subject: [PATCH 1486/3326] drm/vc4: Fix NULL pointer dereference in vc4_save_hang_state() commit 17b11b76b87afe9f8be199d7a5f442497133e2b0 upstream. When saving BOs in the hang state we skip one entry of the kernel_state->bo[] array, thus leaving it to NULL. This leads to a NULL pointer dereference when, later in this function, we iterate over all BOs to check their ->madv state. Fixes: ca26d28bbaa3 ("drm/vc4: improve throughput by pipelining binning and rendering jobs") Signed-off-by: Boris Brezillon Signed-off-by: Eric Anholt Reviewed-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/20180118145821.22344-1-boris.brezillon@free-electrons.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/vc4/vc4_gem.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index d0c6bfb68c4ee9..c50debb1986f06 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev) struct vc4_exec_info *exec[2]; struct vc4_bo *bo; unsigned long irqflags; - unsigned int i, j, unref_list_count, prev_idx; + unsigned int i, j, k, unref_list_count; kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL); if (!kernel_state) @@ -182,24 +182,24 @@ vc4_save_hang_state(struct drm_device *dev) return; } - prev_idx = 0; + k = 0; for (i = 0; i < 2; i++) { if (!exec[i]) continue; for (j = 0; j < exec[i]->bo_count; j++) { drm_gem_object_get(&exec[i]->bo[j]->base); - kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base; + kernel_state->bo[k++] = &exec[i]->bo[j]->base; } list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { drm_gem_object_get(&bo->base.base); - kernel_state->bo[j + prev_idx] = &bo->base.base; - j++; + kernel_state->bo[k++] = &bo->base.base; } - prev_idx = j + 1; } + WARN_ON_ONCE(k != state->bo_count); + if (exec[0]) state->start_bin = exec[0]->ct0ca; if (exec[1]) From f703437099b4ec09a643ad0ae2839ca292601166 Mon Sep 17 00:00:00 2001 From: Jim Westfall Date: Sun, 14 Jan 2018 04:18:50 -0800 Subject: [PATCH 1487/3326] net: Allow neigh contructor functions ability to modify the primary_key [ Upstream commit 096b9854c04df86f03b38a97d40b6506e5730919 ] Use n->primary_key instead of pkey to account for the possibility that a neigh constructor function may have modified the primary_key value. Signed-off-by: Jim Westfall Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/neighbour.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 16a1a4c4eb57fa..741ae2554190a8 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -532,7 +532,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) nht = neigh_hash_grow(tbl, nht->hash_shift + 1); - hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); + hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); if (n->parms->dead) { rc = ERR_PTR(-EINVAL); @@ -544,7 +544,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, n1 != NULL; n1 = rcu_dereference_protected(n1->next, lockdep_is_held(&tbl->lock))) { - if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) { + if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) { if (want_ref) neigh_hold(n1); rc = n1; From 42d68bf2a42381642ea5ae460c6a5d86a56213f0 Mon Sep 17 00:00:00 2001 From: Jim Westfall Date: Sun, 14 Jan 2018 04:18:51 -0800 Subject: [PATCH 1488/3326] ipv4: Make neigh lookup keys for loopback/point-to-point devices be INADDR_ANY [ Upstream commit cd9ff4de0107c65d69d02253bb25d6db93c3dbc1 ] Map all lookup neigh keys to INADDR_ANY for loopback/point-to-point devices to avoid making an entry for every remote ip the device needs to talk to. This used the be the old behavior but became broken in a263b3093641f (ipv4: Make neigh lookups directly in output packet path) and later removed in 0bb4087cbec0 (ipv4: Fix neigh lookup keying over loopback/point-to-point devices) because it was broken. Signed-off-by: Jim Westfall Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/arp.h | 3 +++ net/ipv4/arp.c | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/include/net/arp.h b/include/net/arp.h index dc8cd47f883b8b..977aabfcdc03bf 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -20,6 +20,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key) { + if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) + key = INADDR_ANY; + return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev); } diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 7c45b889670981..a1d1f50e0e19c0 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey) static int arp_constructor(struct neighbour *neigh) { - __be32 addr = *(__be32 *)neigh->primary_key; + __be32 addr; struct net_device *dev = neigh->dev; struct in_device *in_dev; struct neigh_parms *parms; + u32 inaddr_any = INADDR_ANY; + if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) + memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len); + + addr = *(__be32 *)neigh->primary_key; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (!in_dev) { From c277f3420a638d455162a8bfccb40ebe22ba57b4 Mon Sep 17 00:00:00 2001 From: Alexey Kodanev Date: Fri, 26 Jan 2018 15:14:16 +0300 Subject: [PATCH 1489/3326] dccp: don't restart ccid2_hc_tx_rto_expire() if sk in closed state [ Upstream commit dd5684ecae3bd8e44b644f50e2c12c7e57fdfef5 ] ccid2_hc_tx_rto_expire() timer callback always restarts the timer again and can run indefinitely (unless it is stopped outside), and after commit 120e9dabaf55 ("dccp: defer ccid_hc_tx_delete() at dismantle time"), which moved ccid_hc_tx_delete() (also includes sk_stop_timer()) from dccp_destroy_sock() to sk_destruct(), this started to happen quite often. The timer prevents releasing the socket, as a result, sk_destruct() won't be called. Found with LTP/dccp_ipsec tests running on the bonding device, which later couldn't be unloaded after the tests were completed: unregister_netdevice: waiting for bond0 to become free. Usage count = 148 Fixes: 2a91aa396739 ("[DCCP] CCID2: Initial CCID2 (TCP-Like) implementation") Signed-off-by: Alexey Kodanev Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/dccp/ccids/ccid2.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index e1295d5f2c562e..97791b0b1b5141 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c @@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(unsigned long data) ccid2_pr_debug("RTO_EXPIRE\n"); + if (sk->sk_state == DCCP_CLOSED) + goto out; + /* back-off timer */ hc->tx_rto <<= 1; if (hc->tx_rto > DCCP_RTO_MAX) From 2295b3dd543f9a5a1834e4265d506a5bc0819983 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 22 Jan 2018 20:06:42 +0000 Subject: [PATCH 1490/3326] ipv6: Fix getsockopt() for sockets with default IPV6_AUTOFLOWLABEL [ Upstream commit e9191ffb65d8e159680ce0ad2224e1acbde6985c ] Commit 513674b5a2c9 ("net: reevalulate autoflowlabel setting after sysctl setting") removed the initialisation of ipv6_pinfo::autoflowlabel and added a second flag to indicate whether this field or the net namespace default should be used. The getsockopt() handling for this case was not updated, so it currently returns 0 for all sockets for which IPV6_AUTOFLOWLABEL is not explicitly enabled. Fix it to return the effective value, whether that has been set at the socket or net namespace level. Fixes: 513674b5a2c9 ("net: reevalulate autoflowlabel setting after sysctl ...") Signed-off-by: Ben Hutchings Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/ipv6.h | 1 + net/ipv6/ip6_output.c | 2 +- net/ipv6/ipv6_sockglue.c | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 35e9dd2d18baa4..9596aa93d6ef3e 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -291,6 +291,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, int flags); int ip6_flowlabel_init(void); void ip6_flowlabel_cleanup(void); +bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np); static inline void fl6_sock_release(struct ip6_flowlabel *fl) { diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 688ba5f7516b37..08446e0ca411fe 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -166,7 +166,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb) !(IP6CB(skb)->flags & IP6SKB_REROUTED)); } -static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np) +bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np) { if (!np->autoflowlabel_set) return ip6_default_np_autolabel(net); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 90dbfa78a39075..3b251760cb8c34 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -1324,7 +1324,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, break; case IPV6_AUTOFLOWLABEL: - val = np->autoflowlabel; + val = ip6_autoflowlabel(sock_net(sk), np); break; case IPV6_RECVFRAGSIZE: From 8278804e05f6bcfe3fdfea4a404020752ead15a6 Mon Sep 17 00:00:00 2001 From: Mike Maloney Date: Wed, 10 Jan 2018 12:45:10 -0500 Subject: [PATCH 1491/3326] ipv6: fix udpv6 sendmsg crash caused by too small MTU [ Upstream commit 749439bfac6e1a2932c582e2699f91d329658196 ] The logic in __ip6_append_data() assumes that the MTU is at least large enough for the headers. A device's MTU may be adjusted after being added while sendmsg() is processing data, resulting in __ip6_append_data() seeing any MTU. For an mtu smaller than the size of the fragmentation header, the math results in a negative 'maxfraglen', which causes problems when refragmenting any previous skb in the skb_write_queue, leaving it possibly malformed. Instead sendmsg returns EINVAL when the mtu is calculated to be less than IPV6_MIN_MTU. Found by syzkaller: kernel BUG at ./include/linux/skbuff.h:2064! invalid opcode: 0000 [#1] SMP KASAN Dumping ftrace buffer: (ftrace buffer empty) Modules linked in: CPU: 1 PID: 14216 Comm: syz-executor5 Not tainted 4.13.0-rc4+ #2 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 task: ffff8801d0b68580 task.stack: ffff8801ac6b8000 RIP: 0010:__skb_pull include/linux/skbuff.h:2064 [inline] RIP: 0010:__ip6_make_skb+0x18cf/0x1f70 net/ipv6/ip6_output.c:1617 RSP: 0018:ffff8801ac6bf570 EFLAGS: 00010216 RAX: 0000000000010000 RBX: 0000000000000028 RCX: ffffc90003cce000 RDX: 00000000000001b8 RSI: ffffffff839df06f RDI: ffff8801d9478ca0 RBP: ffff8801ac6bf780 R08: ffff8801cc3f1dbc R09: 0000000000000000 R10: ffff8801ac6bf7a0 R11: 43cb4b7b1948a9e7 R12: ffff8801cc3f1dc8 R13: ffff8801cc3f1d40 R14: 0000000000001036 R15: dffffc0000000000 FS: 00007f43d740c700(0000) GS:ffff8801dc100000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f7834984000 CR3: 00000001d79b9000 CR4: 00000000001406e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: ip6_finish_skb include/net/ipv6.h:911 [inline] udp_v6_push_pending_frames+0x255/0x390 net/ipv6/udp.c:1093 udpv6_sendmsg+0x280d/0x31a0 net/ipv6/udp.c:1363 inet_sendmsg+0x11f/0x5e0 net/ipv4/af_inet.c:762 sock_sendmsg_nosec net/socket.c:633 [inline] sock_sendmsg+0xca/0x110 net/socket.c:643 SYSC_sendto+0x352/0x5a0 net/socket.c:1750 SyS_sendto+0x40/0x50 net/socket.c:1718 entry_SYSCALL_64_fastpath+0x1f/0xbe RIP: 0033:0x4512e9 RSP: 002b:00007f43d740bc08 EFLAGS: 00000216 ORIG_RAX: 000000000000002c RAX: ffffffffffffffda RBX: 00000000007180a8 RCX: 00000000004512e9 RDX: 000000000000002e RSI: 0000000020d08000 RDI: 0000000000000005 RBP: 0000000000000086 R08: 00000000209c1000 R09: 000000000000001c R10: 0000000000040800 R11: 0000000000000216 R12: 00000000004b9c69 R13: 00000000ffffffff R14: 0000000000000005 R15: 00000000202c2000 Code: 9e 01 fe e9 c5 e8 ff ff e8 7f 9e 01 fe e9 4a ea ff ff 48 89 f7 e8 52 9e 01 fe e9 aa eb ff ff e8 a8 b6 cf fd 0f 0b e8 a1 b6 cf fd <0f> 0b 49 8d 45 78 4d 8d 45 7c 48 89 85 78 fe ff ff 49 8d 85 ba RIP: __skb_pull include/linux/skbuff.h:2064 [inline] RSP: ffff8801ac6bf570 RIP: __ip6_make_skb+0x18cf/0x1f70 net/ipv6/ip6_output.c:1617 RSP: ffff8801ac6bf570 Reported-by: syzbot Signed-off-by: Mike Maloney Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_output.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 08446e0ca411fe..7ef142706d1439 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1206,14 +1206,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, v6_cork->tclass = ipc6->tclass; if (rt->dst.flags & DST_XFRM_TUNNEL) mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? - rt->dst.dev->mtu : dst_mtu(&rt->dst); + READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst); else mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? - rt->dst.dev->mtu : dst_mtu(rt->dst.path); + READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path); if (np->frag_size < mtu) { if (np->frag_size) mtu = np->frag_size; } + if (mtu < IPV6_MIN_MTU) + return -EINVAL; cork->base.fragsize = mtu; if (dst_allfrag(rt->dst.path)) cork->base.flags |= IPCORK_ALLFRAG; From 3472170784d849018356e0bcb7d5c993ffc65699 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 11 Jan 2018 22:31:18 -0800 Subject: [PATCH 1492/3326] ipv6: ip6_make_skb() needs to clear cork.base.dst [ Upstream commit 95ef498d977bf44ac094778fd448b98af158a3e6 ] In my last patch, I missed fact that cork.base.dst was not initialized in ip6_make_skb() : If ip6_setup_cork() returns an error, we might attempt a dst_release() on some random pointer. Fixes: 862c03ee1deb ("ipv6: fix possible mem leaks in ipv6_make_skb()") Signed-off-by: Eric Dumazet Reported-by: syzbot Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_output.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 7ef142706d1439..3763dc01e37477 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1735,6 +1735,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, cork.base.flags = 0; cork.base.addr = 0; cork.base.opt = NULL; + cork.base.dst = NULL; v6_cork.opt = NULL; err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6); if (err) { From 2afdce2c76b2ccc2c882b5f5eefc8521c8877dee Mon Sep 17 00:00:00 2001 From: Yuiko Oshino Date: Mon, 15 Jan 2018 13:24:28 -0500 Subject: [PATCH 1493/3326] lan78xx: Fix failure in USB Full Speed [ Upstream commit a5b1379afbfabf91e3a689e82ac619a7157336b3 ] Fix initialize the uninitialized tx_qlen to an appropriate value when USB Full Speed is used. Fixes: 55d7de9de6c3 ("Microchip's LAN7800 family USB 2/3 to 10/100/1000 Ethernet device driver") Signed-off-by: Yuiko Oshino Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/lan78xx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 0161f77641fac8..a8dd1c7a08cbe8 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2396,6 +2396,7 @@ static int lan78xx_reset(struct lan78xx_net *dev) buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; dev->rx_qlen = 4; + dev->tx_qlen = 4; } ret = lan78xx_write_reg(dev, BURST_CAP, buf); From d9bee33e397150b43be9ac2271b813552a94345f Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Fri, 19 Jan 2018 11:50:46 +0100 Subject: [PATCH 1494/3326] net: igmp: fix source address check for IGMPv3 reports [ Upstream commit ad23b750933ea7bf962678972a286c78a8fa36aa ] Commit "net: igmp: Use correct source address on IGMPv3 reports" introduced a check to validate the source address of locally generated IGMPv3 packets. Instead of checking the local interface address directly, it uses inet_ifa_match(fl4->saddr, ifa), which checks if the address is on the local subnet (or equal to the point-to-point address if used). This breaks for point-to-point interfaces, so check against ifa->ifa_local directly. Cc: Kevin Cernekee Fixes: a46182b00290 ("net: igmp: Use correct source address on IGMPv3 reports") Reported-by: Sebastian Gottschall Signed-off-by: Felix Fietkau Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/igmp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index c621266e0306df..013fed55b610f3 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -332,7 +332,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev, return htonl(INADDR_ANY); for_ifa(in_dev) { - if (inet_ifa_match(fl4->saddr, ifa)) + if (fl4->saddr == ifa->ifa_local) return fl4->saddr; } endfor_ifa(in_dev); From 450449fff22687db76d7611acb48f94b685bc762 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 18 Jan 2018 19:59:19 -0800 Subject: [PATCH 1495/3326] net: qdisc_pkt_len_init() should be more robust [ Upstream commit 7c68d1a6b4db9012790af7ac0f0fdc0d2083422a ] Without proper validation of DODGY packets, we might very well feed qdisc_pkt_len_init() with invalid GSO packets. tcp_hdrlen() might access out-of-bound data, so let's use skb_header_pointer() and proper checks. Whole story is described in commit d0c081b49137 ("flow_dissector: properly cap thoff field") We have the goal of validating DODGY packets earlier in the stack, so we might very well revert this fix in the future. Signed-off-by: Eric Dumazet Cc: Willem de Bruijn Cc: Jason Wang Reported-by: syzbot+9da69ebac7dddd804552@syzkaller.appspotmail.com Acked-by: Jason Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/dev.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 27357fc1730b93..ffee085f035713 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3128,10 +3128,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb) hdr_len = skb_transport_header(skb) - skb_mac_header(skb); /* + transport layer */ - if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) - hdr_len += tcp_hdrlen(skb); - else - hdr_len += sizeof(struct udphdr); + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { + const struct tcphdr *th; + struct tcphdr _tcphdr; + + th = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_tcphdr), &_tcphdr); + if (likely(th)) + hdr_len += __tcp_hdrlen(th); + } else { + struct udphdr _udphdr; + + if (skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_udphdr), &_udphdr)) + hdr_len += sizeof(struct udphdr); + } if (shinfo->gso_type & SKB_GSO_DODGY) gso_segs = DIV_ROUND_UP(skb->len - hdr_len, From 32e57f8c557faed0cd976abfb800a2f363e6972a Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 18 Jan 2018 16:14:26 -0500 Subject: [PATCH 1496/3326] net: tcp: close sock if net namespace is exiting [ Upstream commit 4ee806d51176ba7b8ff1efd81f271d7252e03a1d ] When a tcp socket is closed, if it detects that its net namespace is exiting, close immediately and do not wait for FIN sequence. For normal sockets, a reference is taken to their net namespace, so it will never exit while the socket is open. However, kernel sockets do not take a reference to their net namespace, so it may begin exiting while the kernel socket is still open. In this case if the kernel socket is a tcp socket, it will stay open trying to complete its close sequence. The sock's dst(s) hold a reference to their interface, which are all transferred to the namespace's loopback interface when the real interfaces are taken down. When the namespace tries to take down its loopback interface, it hangs waiting for all references to the loopback interface to release, which results in messages like: unregister_netdevice: waiting for lo to become free. Usage count = 1 These messages continue until the socket finally times out and closes. Since the net namespace cleanup holds the net_mutex while calling its registered pernet callbacks, any new net namespace initialization is blocked until the current net namespace finishes exiting. After this change, the tcp socket notices the exiting net namespace, and closes immediately, releasing its dst(s) and their reference to the loopback interface, which lets the net namespace continue exiting. Link: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1711407 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=97811 Signed-off-by: Dan Streetman Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/net_namespace.h | 10 ++++++++++ net/ipv4/tcp.c | 3 +++ net/ipv4/tcp_timer.c | 15 +++++++++++++++ 3 files changed, 28 insertions(+) diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 10f99dafd5acb1..049008493faf67 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -223,6 +223,11 @@ int net_eq(const struct net *net1, const struct net *net2) return net1 == net2; } +static inline int check_net(const struct net *net) +{ + return atomic_read(&net->count) != 0; +} + void net_drop_ns(void *); #else @@ -247,6 +252,11 @@ int net_eq(const struct net *net1, const struct net *net2) return 1; } +static inline int check_net(const struct net *net) +{ + return 1; +} + #define net_drop_ns NULL #endif diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index a0c72b09cefcdd..2a65d806b562b9 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2273,6 +2273,9 @@ void tcp_close(struct sock *sk, long timeout) tcp_send_active_reset(sk, GFP_ATOMIC); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); + } else if (!check_net(sock_net(sk))) { + /* Not possible to send reset; just close */ + tcp_set_state(sk, TCP_CLOSE); } } diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index e9af1879cd5360..14ac7df9538013 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -50,11 +50,19 @@ static void tcp_write_err(struct sock *sk) * to prevent DoS attacks. It is called when a retransmission timeout * or zero probe timeout occurs on orphaned socket. * + * Also close if our net namespace is exiting; in that case there is no + * hope of ever communicating again since all netns interfaces are already + * down (or about to be down), and we need to release our dst references, + * which have been moved to the netns loopback interface, so the namespace + * can finish exiting. This condition is only possible if we are a kernel + * socket, as those do not hold references to the namespace. + * * Criteria is still not confirmed experimentally and may change. * We kill the socket, if: * 1. If number of orphaned sockets exceeds an administratively configured * limit. * 2. If we have strong memory pressure. + * 3. If our net namespace is exiting. */ static int tcp_out_of_resources(struct sock *sk, bool do_reset) { @@ -83,6 +91,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); return 1; } + + if (!check_net(sock_net(sk))) { + /* Not possible to send reset; just close */ + tcp_done(sk); + return 1; + } + return 0; } From d3048a12f3eccc00d62db373df4cd50b1218f6f1 Mon Sep 17 00:00:00 2001 From: "r.hering@avm.de" Date: Fri, 12 Jan 2018 15:42:06 +0100 Subject: [PATCH 1497/3326] net/tls: Fix inverted error codes to avoid endless loop [ Upstream commit 30be8f8dba1bd2aff73e8447d59228471233a3d4 ] sendfile() calls can hang endless with using Kernel TLS if a socket error occurs. Socket error codes must be inverted by Kernel TLS before returning because they are stored with positive sign. If returned non-inverted they are interpreted as number of bytes sent, causing endless looping of the splice mechanic behind sendfile(). Signed-off-by: Robert Hering Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/tls.h | 2 +- net/tls/tls_sw.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/net/tls.h b/include/net/tls.h index c06db1eadac217..df950383b8c187 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -168,7 +168,7 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) static inline void tls_err_abort(struct sock *sk) { - sk->sk_err = -EBADMSG; + sk->sk_err = EBADMSG; sk->sk_error_report(sk); } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index f00383a3762205..81cc4325682fd9 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -407,7 +407,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) while (msg_data_left(msg)) { if (sk->sk_err) { - ret = sk->sk_err; + ret = -sk->sk_err; goto send_end; } @@ -560,7 +560,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page, size_t copy, required_size; if (sk->sk_err) { - ret = sk->sk_err; + ret = -sk->sk_err; goto sendpage_end; } From 6ea6b86ae73e1b36ddba3015739cedd24b062523 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 24 Jan 2018 19:37:37 -0800 Subject: [PATCH 1498/3326] net: vrf: Add support for sends to local broadcast address [ Upstream commit 1e19c4d689dc1e95bafd23ef68fbc0c6b9e05180 ] Sukumar reported that sends to the local broadcast address (255.255.255.255) are broken. Check for the address in vrf driver and do not redirect to the VRF device - similar to multicast packets. With this change sockets can use SO_BINDTODEVICE to specify an egress interface and receive responses. Note: the egress interface can not be a VRF device but needs to be the enslaved device. https://bugzilla.kernel.org/show_bug.cgi?id=198521 Reported-by: Sukumar Gopalakrishnan Signed-off-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/vrf.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 7dc3bcac350666..67ecf2425b889c 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -674,8 +674,9 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, struct sock *sk, struct sk_buff *skb) { - /* don't divert multicast */ - if (ipv4_is_multicast(ip_hdr(skb)->daddr)) + /* don't divert multicast or local broadcast */ + if (ipv4_is_multicast(ip_hdr(skb)->daddr) || + ipv4_is_lbcast(ip_hdr(skb)->daddr)) return skb; if (qdisc_tx_is_default(vrf_dev)) From d32e5740001972c1bb193dd60af02721d047a17e Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 22 Jan 2018 18:06:37 +0100 Subject: [PATCH 1499/3326] pppoe: take ->needed_headroom of lower device into account on xmit [ Upstream commit 02612bb05e51df8489db5e94d0cf8d1c81f87b0c ] In pppoe_sendmsg(), reserving dev->hard_header_len bytes of headroom was probably fine before the introduction of ->needed_headroom in commit f5184d267c1a ("net: Allow netdevices to specify needed head/tailroom"). But now, virtual devices typically advertise the size of their overhead in dev->needed_headroom, so we must also take it into account in skb_reserve(). Allocation size of skb is also updated to take dev->needed_tailroom into account and replace the arbitrary 32 bytes with the real size of a PPPoE header. This issue was discovered by syzbot, who connected a pppoe socket to a gre device which had dev->header_ops->create == ipgre_header and dev->hard_header_len == 0. Therefore, PPPoE didn't reserve any headroom, and dev_hard_header() crashed when ipgre_header() tried to prepend its header to skb->data. skbuff: skb_under_panic: text:000000001d390b3a len:31 put:24 head:00000000d8ed776f data:000000008150e823 tail:0x7 end:0xc0 dev:gre0 ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:104! invalid opcode: 0000 [#1] SMP KASAN Dumping ftrace buffer: (ftrace buffer empty) Modules linked in: CPU: 1 PID: 3670 Comm: syzkaller801466 Not tainted 4.15.0-rc7-next-20180115+ #97 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 RIP: 0010:skb_panic+0x162/0x1f0 net/core/skbuff.c:100 RSP: 0018:ffff8801d9bd7840 EFLAGS: 00010282 RAX: 0000000000000083 RBX: ffff8801d4f083c0 RCX: 0000000000000000 RDX: 0000000000000083 RSI: 1ffff1003b37ae92 RDI: ffffed003b37aefc RBP: ffff8801d9bd78a8 R08: 1ffff1003b37ae8a R09: 0000000000000000 R10: 0000000000000001 R11: 0000000000000000 R12: ffffffff86200de0 R13: ffffffff84a981ad R14: 0000000000000018 R15: ffff8801d2d34180 FS: 00000000019c4880(0000) GS:ffff8801db300000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000208bc000 CR3: 00000001d9111001 CR4: 00000000001606e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: skb_under_panic net/core/skbuff.c:114 [inline] skb_push+0xce/0xf0 net/core/skbuff.c:1714 ipgre_header+0x6d/0x4e0 net/ipv4/ip_gre.c:879 dev_hard_header include/linux/netdevice.h:2723 [inline] pppoe_sendmsg+0x58e/0x8b0 drivers/net/ppp/pppoe.c:890 sock_sendmsg_nosec net/socket.c:630 [inline] sock_sendmsg+0xca/0x110 net/socket.c:640 sock_write_iter+0x31a/0x5d0 net/socket.c:909 call_write_iter include/linux/fs.h:1775 [inline] do_iter_readv_writev+0x525/0x7f0 fs/read_write.c:653 do_iter_write+0x154/0x540 fs/read_write.c:932 vfs_writev+0x18a/0x340 fs/read_write.c:977 do_writev+0xfc/0x2a0 fs/read_write.c:1012 SYSC_writev fs/read_write.c:1085 [inline] SyS_writev+0x27/0x30 fs/read_write.c:1082 entry_SYSCALL_64_fastpath+0x29/0xa0 Admittedly PPPoE shouldn't be allowed to run on non Ethernet-like interfaces, but reserving space for ->needed_headroom is a more fundamental issue that needs to be addressed first. Same problem exists for __pppoe_xmit(), which also needs to take dev->needed_headroom into account in skb_cow_head(). Fixes: f5184d267c1a ("net: Allow netdevices to specify needed head/tailroom") Reported-by: syzbot+ed0838d0fa4c4f2b528e20286e6dc63effc7c14d@syzkaller.appspotmail.com Signed-off-by: Guillaume Nault Reviewed-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ppp/pppoe.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 4e1da1645b154d..5aa59f41bf8c39 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m, struct pppoe_hdr *ph; struct net_device *dev; char *start; + int hlen; lock_sock(sk); if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { @@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m, if (total_len > (dev->mtu + dev->hard_header_len)) goto end; - - skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32, - 0, GFP_KERNEL); + hlen = LL_RESERVED_SPACE(dev); + skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len + + dev->needed_tailroom, 0, GFP_KERNEL); if (!skb) { error = -ENOMEM; goto end; } /* Reserve space for headers. */ - skb_reserve(skb, dev->hard_header_len); + skb_reserve(skb, hlen); skb_reset_network_header(skb); skb->dev = dev; @@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) /* Copy the data if there is no space for the header or if it's * read-only. */ - if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len)) + if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph))) goto abort; __skb_push(skb, sizeof(*ph)); From 72d4f3abd6d3521f5cd978b63f9301051f127812 Mon Sep 17 00:00:00 2001 From: Francois Romieu Date: Fri, 26 Jan 2018 01:53:26 +0100 Subject: [PATCH 1500/3326] r8169: fix memory corruption on retrieval of hardware statistics. [ Upstream commit a78e93661c5fd30b9e1dee464b2f62f966883ef7 ] Hardware statistics retrieval hurts in tight invocation loops. Avoid extraneous write and enforce strict ordering of writes targeted to the tally counters dump area address registers. Signed-off-by: Francois Romieu Tested-by: Oliver Freyermuth Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/realtek/r8169.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 9541465e43e975..958ff931e790a9 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -2239,19 +2239,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd) void __iomem *ioaddr = tp->mmio_addr; dma_addr_t paddr = tp->counters_phys_addr; u32 cmd; - bool ret; RTL_W32(CounterAddrHigh, (u64)paddr >> 32); + RTL_R32(CounterAddrHigh); cmd = (u64)paddr & DMA_BIT_MASK(32); RTL_W32(CounterAddrLow, cmd); RTL_W32(CounterAddrLow, cmd | counter_cmd); - ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); - - RTL_W32(CounterAddrLow, 0); - RTL_W32(CounterAddrHigh, 0); - - return ret; + return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); } static bool rtl8169_reset_counters(struct net_device *dev) From f2e957097905dd53a9ca513702fe0c9eecb73ddc Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 15 Jan 2018 17:02:00 +0800 Subject: [PATCH 1501/3326] sctp: do not allow the v4 socket to bind a v4mapped v6 address [ Upstream commit c5006b8aa74599ce19104b31d322d2ea9ff887cc ] The check in sctp_sockaddr_af is not robust enough to forbid binding a v4mapped v6 addr on a v4 socket. The worse thing is that v4 socket's bind_verify would not convert this v4mapped v6 addr to a v4 addr. syzbot even reported a crash as the v4 socket bound a v6 addr. This patch is to fix it by doing the common sa.sa_family check first, then AF_INET check for v4mapped v6 addrs. Fixes: 7dab83de50c7 ("sctp: Support ipv6only AF_INET6 sockets.") Reported-by: syzbot+7b7b518b1228d2743963@syzkaller.appspotmail.com Acked-by: Neil Horman Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index df806b8819aa4c..7b258bf60eca9a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -334,16 +334,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, if (len < sizeof (struct sockaddr)) return NULL; + if (!opt->pf->af_supported(addr->sa.sa_family, opt)) + return NULL; + /* V4 mapped address are really of AF_INET family */ if (addr->sa.sa_family == AF_INET6 && - ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { - if (!opt->pf->af_supported(AF_INET, opt)) - return NULL; - } else { - /* Does this PF support this AF? */ - if (!opt->pf->af_supported(addr->sa.sa_family, opt)) - return NULL; - } + ipv6_addr_v4mapped(&addr->v6.sin6_addr) && + !opt->pf->af_supported(AF_INET, opt)) + return NULL; /* If we get this far, af is valid. */ af = sctp_get_af_specific(addr->sa.sa_family); From a940c196461b8430dd21024203d3916032096839 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 15 Jan 2018 17:01:36 +0800 Subject: [PATCH 1502/3326] sctp: return error if the asoc has been peeled off in sctp_wait_for_sndbuf [ Upstream commit a0ff660058b88d12625a783ce9e5c1371c87951f ] After commit cea0cc80a677 ("sctp: use the right sk after waking up from wait_buf sleep"), it may change to lock another sk if the asoc has been peeled off in sctp_wait_for_sndbuf. However, the asoc's new sk could be already closed elsewhere, as it's in the sendmsg context of the old sk that can't avoid the new sk's closing. If the sk's last one refcnt is held by this asoc, later on after putting this asoc, the new sk will be freed, while under it's own lock. This patch is to revert that commit, but fix the old issue by returning error under the old sk's lock. Fixes: cea0cc80a677 ("sctp: use the right sk after waking up from wait_buf sleep") Reported-by: syzbot+ac6ea7baa4432811eb50@syzkaller.appspotmail.com Signed-off-by: Xin Long Acked-by: Neil Horman Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 7b258bf60eca9a..7d432266645c32 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -84,7 +84,7 @@ static int sctp_writeable(struct sock *sk); static void sctp_wfree(struct sk_buff *skb); static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, - size_t msg_len, struct sock **orig_sk); + size_t msg_len); static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); static int sctp_wait_for_accept(struct sock *sk, long timeo); @@ -1961,7 +1961,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); if (!sctp_wspace(asoc)) { /* sk can be changed by peel off when waiting for buf. */ - err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk); + err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); if (err) { if (err == -ESRCH) { /* asoc is already dead. */ @@ -7825,12 +7825,12 @@ void sctp_sock_rfree(struct sk_buff *skb) /* Helper function to wait for space in the sndbuf. */ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, - size_t msg_len, struct sock **orig_sk) + size_t msg_len) { struct sock *sk = asoc->base.sk; - int err = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); + int err = 0; pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, *timeo_p, msg_len); @@ -7859,17 +7859,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, release_sock(sk); current_timeo = schedule_timeout(current_timeo); lock_sock(sk); - if (sk != asoc->base.sk) { - release_sock(sk); - sk = asoc->base.sk; - lock_sock(sk); - } + if (sk != asoc->base.sk) + goto do_error; *timeo_p = current_timeo; } out: - *orig_sk = sk; finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ From 8de7fb3dfb0d381358fbfc8632d0818d8a1fec7e Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Wed, 10 Jan 2018 12:50:25 -0800 Subject: [PATCH 1503/3326] tipc: fix a memory leak in tipc_nl_node_get_link() [ Upstream commit 59b36613e85fb16ebf9feaf914570879cd5c2a21 ] When tipc_node_find_by_name() fails, the nlmsg is not freed. While on it, switch to a goto label to properly free it. Fixes: be9c086715c ("tipc: narrow down exposure of struct tipc_node") Reported-by: Dmitry Vyukov Cc: Jon Maloy Cc: Ying Xue Signed-off-by: Cong Wang Acked-by: Ying Xue Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tipc/node.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/net/tipc/node.c b/net/tipc/node.c index 198dbc7adbe126..f6c5743c170e74 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1848,36 +1848,38 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) if (strcmp(name, tipc_bclink_name) == 0) { err = tipc_nl_add_bc_link(net, &msg); - if (err) { - nlmsg_free(msg.skb); - return err; - } + if (err) + goto err_free; } else { int bearer_id; struct tipc_node *node; struct tipc_link *link; node = tipc_node_find_by_name(net, name, &bearer_id); - if (!node) - return -EINVAL; + if (!node) { + err = -EINVAL; + goto err_free; + } tipc_node_read_lock(node); link = node->links[bearer_id].link; if (!link) { tipc_node_read_unlock(node); - nlmsg_free(msg.skb); - return -EINVAL; + err = -EINVAL; + goto err_free; } err = __tipc_nl_add_link(net, &msg, link, 0); tipc_node_read_unlock(node); - if (err) { - nlmsg_free(msg.skb); - return err; - } + if (err) + goto err_free; } return genlmsg_reply(msg.skb, info); + +err_free: + nlmsg_free(msg.skb); + return err; } int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) From 2ac1797da0f79013ef166df6a5124db267b9e1b6 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Tue, 9 Jan 2018 11:41:10 +0200 Subject: [PATCH 1504/3326] {net,ib}/mlx5: Don't disable local loopback multicast traffic when needed [ Upstream commit 8978cc921fc7fad3f4d6f91f1da01352aeeeff25 ] There are systems platform information management interfaces (such as HOST2BMC) for which we cannot disable local loopback multicast traffic. Separate disable_local_lb_mc and disable_local_lb_uc capability bits so driver will not disable multicast loopback traffic if not supported. (It is expected that Firmware will not set disable_local_lb_mc if HOST2BMC is running for example.) Function mlx5_nic_vport_update_local_lb will do best effort to disable/enable UC/MC loopback traffic and return success only in case it succeeded to changed all allowed by Firmware. Adapt mlx5_ib and mlx5e to support the new cap bits. Fixes: 2c43c5a036be ("net/mlx5e: Enable local loopback in loopback selftest") Fixes: c85023e153e3 ("IB/mlx5: Add raw ethernet local loopback support") Fixes: bded747bb432 ("net/mlx5: Add raw ethernet local loopback firmware command") Signed-off-by: Eran Ben Elisha Cc: kernel-team@fb.com Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/mlx5/main.c | 9 ++++--- .../ethernet/mellanox/mlx5/core/en_selftest.c | 27 ++++++++++++------- .../net/ethernet/mellanox/mlx5/core/main.c | 3 +-- .../net/ethernet/mellanox/mlx5/core/vport.c | 22 ++++++++++----- include/linux/mlx5/mlx5_ifc.h | 5 ++-- 5 files changed, 44 insertions(+), 22 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 30d479f87cb8ae..fb5302ee57c755 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1276,7 +1276,8 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn) return err; if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || - !MLX5_CAP_GEN(dev->mdev, disable_local_lb)) + (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && + !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) return err; mutex_lock(&dev->lb_mutex); @@ -1294,7 +1295,8 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn) mlx5_core_dealloc_transport_domain(dev->mdev, tdn); if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || - !MLX5_CAP_GEN(dev->mdev, disable_local_lb)) + (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && + !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) return; mutex_lock(&dev->lb_mutex); @@ -4161,7 +4163,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) } if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && - MLX5_CAP_GEN(mdev, disable_local_lb)) + (MLX5_CAP_GEN(mdev, disable_local_lb_uc) || + MLX5_CAP_GEN(mdev, disable_local_lb_mc))) mutex_init(&dev->lb_mutex); dev->ib_active = true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 1f1f8af87d4df8..5a4608281f38d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -238,15 +238,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv, int err = 0; /* Temporarily enable local_lb */ - if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) { - mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb); - if (!lbtp->local_lb) - mlx5_nic_vport_update_local_lb(priv->mdev, true); + err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb); + if (err) + return err; + + if (!lbtp->local_lb) { + err = mlx5_nic_vport_update_local_lb(priv->mdev, true); + if (err) + return err; } err = mlx5e_refresh_tirs(priv, true); if (err) - return err; + goto out; lbtp->loopback_ok = false; init_completion(&lbtp->comp); @@ -256,16 +260,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv, lbtp->pt.dev = priv->netdev; lbtp->pt.af_packet_priv = lbtp; dev_add_pack(&lbtp->pt); + + return 0; + +out: + if (!lbtp->local_lb) + mlx5_nic_vport_update_local_lb(priv->mdev, false); + return err; } static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, struct mlx5e_lbt_priv *lbtp) { - if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) { - if (!lbtp->local_lb) - mlx5_nic_vport_update_local_lb(priv->mdev, false); - } + if (!lbtp->local_lb) + mlx5_nic_vport_update_local_lb(priv->mdev, false); dev_remove_pack(&lbtp->pt); mlx5e_refresh_tirs(priv, false); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 8bfc37e4ec873d..4ddd632d10f995 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -577,8 +577,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev) int ret = 0; /* Disable local_lb by default */ - if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && - MLX5_CAP_GEN(dev, disable_local_lb)) + if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) ret = mlx5_nic_vport_update_local_lb(dev, false); return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index d653b0025b13ea..a1296a62497dab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -908,23 +908,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable) void *in; int err; - mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable"); + if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) && + !MLX5_CAP_GEN(mdev, disable_local_lb_uc)) + return 0; + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; - MLX5_SET(modify_nic_vport_context_in, in, - field_select.disable_mc_local_lb, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.disable_mc_local_lb, !enable); - - MLX5_SET(modify_nic_vport_context_in, in, - field_select.disable_uc_local_lb, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.disable_uc_local_lb, !enable); + if (MLX5_CAP_GEN(mdev, disable_local_lb_mc)) + MLX5_SET(modify_nic_vport_context_in, in, + field_select.disable_mc_local_lb, 1); + + if (MLX5_CAP_GEN(mdev, disable_local_lb_uc)) + MLX5_SET(modify_nic_vport_context_in, in, + field_select.disable_uc_local_lb, 1); + err = mlx5_modify_nic_vport_context(mdev, in, inlen); + if (!err) + mlx5_core_dbg(mdev, "%s local_lb\n", + enable ? "enable" : "disable"); + kvfree(in); return err; } diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index c8091f06eaa4c1..f3765155fa4d9a 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1023,8 +1023,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; - u8 disable_local_lb[0x1]; - u8 reserved_at_3e2[0x9]; + u8 disable_local_lb_uc[0x1]; + u8 disable_local_lb_mc[0x1]; + u8 reserved_at_3e3[0x8]; u8 log_max_vlan_list[0x5]; u8 reserved_at_3f0[0x3]; u8 log_max_current_mc_list[0x5]; From ce1e51d842baa8cf55a05a26933402e55d9b6217 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 4 Jan 2018 04:35:51 +0200 Subject: [PATCH 1505/3326] net/mlx5: Fix get vector affinity helper function [ Upstream commit 05e0cc84e00c54fb152d1f4b86bc211823a83d0c ] mlx5_get_vector_affinity used to call pci_irq_get_affinity and after reverting the patch that sets the device affinity via PCI_IRQ_AFFINITY API, calling pci_irq_get_affinity becomes useless and it breaks RDMA mlx5 users. To fix this, this patch provides an alternative way to retrieve IRQ vector affinity using legacy IRQ API, following smp_affinity read procfs implementation. Fixes: 231243c82793 ("Revert mlx5: move affinity hints assignments to generic code") Fixes: a435393acafb ("mlx5: move affinity hints assignments to generic code") Cc: Sagi Grimberg Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- include/linux/mlx5/driver.h | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 8b3d0103c03aae..a13525daf09b35 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -1194,7 +1195,23 @@ enum { static inline const struct cpumask * mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) { - return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector); + const struct cpumask *mask; + struct irq_desc *desc; + unsigned int irq; + int eqn; + int err; + + err = mlx5_vector2eqn(dev, vector, &eqn, &irq); + if (err) + return NULL; + + desc = irq_to_desc(irq); +#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK + mask = irq_data_get_effective_affinity_mask(&desc->irq_data); +#else + mask = desc->irq_common_data.affinity; +#endif + return mask; } #endif /* MLX5_DRIVER_H */ From 759cd103ddb1df8ca0cb46e59e1c2cabb139fa27 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Wed, 10 Jan 2018 16:24:45 +0100 Subject: [PATCH 1506/3326] ppp: unlock all_ppp_mutex before registering device [ Upstream commit 0171c41835591e9aa2e384b703ef9a6ae367c610 ] ppp_dev_uninit(), which is the .ndo_uninit() handler of PPP devices, needs to lock pn->all_ppp_mutex. Therefore we mustn't call register_netdevice() with pn->all_ppp_mutex already locked, or we'd deadlock in case register_netdevice() fails and calls .ndo_uninit(). Fortunately, we can unlock pn->all_ppp_mutex before calling register_netdevice(). This lock protects pn->units_idr, which isn't used in the device registration process. However, keeping pn->all_ppp_mutex locked during device registration did ensure that no device in transient state would be published in pn->units_idr. In practice, unlocking it before calling register_netdevice() doesn't change this property: ppp_unit_register() is called with 'ppp_mutex' locked and all searches done in pn->units_idr hold this lock too. Fixes: 8cb775bc0a34 ("ppp: fix device unregistration upon netns deletion") Reported-and-tested-by: syzbot+367889b9c9e279219175@syzkaller.appspotmail.com Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ppp/ppp_generic.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index bf14c51f35e17d..8c6b8918ec318a 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1003,17 +1003,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set) if (!ifname_is_set) snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index); + mutex_unlock(&pn->all_ppp_mutex); + ret = register_netdevice(ppp->dev); if (ret < 0) goto err_unit; atomic_inc(&ppp_unit_count); - mutex_unlock(&pn->all_ppp_mutex); - return 0; err_unit: + mutex_lock(&pn->all_ppp_mutex); unit_put(&pn->units_idr, ppp->file.index); err: mutex_unlock(&pn->all_ppp_mutex); From ace99769a1f9f94a204ebaf5d946b4cf1f049153 Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Fri, 19 Jan 2018 20:23:50 +0100 Subject: [PATCH 1507/3326] be2net: restore properly promisc mode after queues reconfiguration [ Upstream commit 52acf06451930eb4cefabd5ecea56e2d46c32f76 ] The commit 622190669403 ("be2net: Request RSS capability of Rx interface depending on number of Rx rings") modified be_update_queues() so the IFACE (HW representation of the netdevice) is destroyed and then re-created. This causes a regression because potential promiscuous mode is not restored properly during be_open() because the driver thinks that the HW has promiscuous mode already enabled. Note that Lancer is not affected by this bug because RX-filter flags are disabled during be_close() for this chipset. Cc: Sathya Perla Cc: Ajit Khaparde Cc: Sriharsha Basavapatna Cc: Somnath Kotur Fixes: 622190669403 ("be2net: Request RSS capability of Rx interface depending on number of Rx rings") Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/emulex/benet/be_main.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0e3d9f39a80756..1b03c32afc1f00 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter) be_schedule_worker(adapter); + /* + * The IF was destroyed and re-created. We need to clear + * all promiscuous flags valid for the destroyed IF. + * Without this promisc mode is not restored during + * be_open() because the driver thinks that it is + * already enabled in HW. + */ + adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS; + if (netif_running(netdev)) status = be_open(netdev); From 8af27b14b9c9028ad486b7f73c7466164d20ae40 Mon Sep 17 00:00:00 2001 From: Alexey Kodanev Date: Thu, 18 Jan 2018 20:51:12 +0300 Subject: [PATCH 1508/3326] ip6_gre: init dev->mtu and dev->hard_header_len correctly [ Upstream commit 128bb975dc3c25d00de04e503e2fe0a780d04459 ] Commit b05229f44228 ("gre6: Cleanup GREv6 transmit path, call common GRE functions") moved dev->mtu initialization from ip6gre_tunnel_setup() to ip6gre_tunnel_init(), as a result, the previously set values, before ndo_init(), are reset in the following cases: * rtnl_create_link() can update dev->mtu from IFLA_MTU parameter. * ip6gre_tnl_link_config() is invoked before ndo_init() in netlink and ioctl setup, so ndo_init() can reset MTU adjustments with the lower device MTU as well, dev->mtu and dev->hard_header_len. Not applicable for ip6gretap because it has one more call to ip6gre_tnl_link_config(tunnel, 1) in ip6gre_tap_init(). Fix the first case by updating dev->mtu with 'tb[IFLA_MTU]' parameter if a user sets it manually on a device creation, and fix the second one by moving ip6gre_tnl_link_config() call after register_netdevice(). Fixes: b05229f44228 ("gre6: Cleanup GREv6 transmit path, call common GRE functions") Fixes: db2ec95d1ba4 ("ip6_gre: Fix MTU setting") Signed-off-by: Alexey Kodanev Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_gre.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 7a2df6646486ae..5b4870caf26809 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -337,11 +337,12 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, nt->dev = dev; nt->net = dev_net(dev); - ip6gre_tnl_link_config(nt, 1); if (register_netdevice(dev) < 0) goto failed_free; + ip6gre_tnl_link_config(nt, 1); + /* Can use a lockless transmit, unless we generate output sequences */ if (!(nt->parms.o_flags & TUNNEL_SEQ)) dev->features |= NETIF_F_LLTX; @@ -1307,7 +1308,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[], static int ip6gre_tap_init(struct net_device *dev) { - struct ip6_tnl *tunnel; int ret; ret = ip6gre_tunnel_init_common(dev); @@ -1316,10 +1316,6 @@ static int ip6gre_tap_init(struct net_device *dev) dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - tunnel = netdev_priv(dev); - - ip6gre_tnl_link_config(tunnel, 1); - return 0; } @@ -1411,12 +1407,16 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev, nt->dev = dev; nt->net = dev_net(dev); - ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); err = register_netdevice(dev); if (err) goto out; + ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); + + if (tb[IFLA_MTU]) + ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); + dev_hold(dev); ip6gre_tunnel_link(ign, nt); From dd7e1cbd26183e4b8c816fb8b9ecc6356b0cb19d Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 19 Jan 2018 09:29:18 -0500 Subject: [PATCH 1509/3326] gso: validate gso_type in GSO handlers [ Upstream commit 121d57af308d0cf943f08f4738d24d3966c38cd9 ] Validate gso_type during segmentation as SKB_GSO_DODGY sources may pass packets where the gso_type does not match the contents. Syzkaller was able to enter the SCTP gso handler with a packet of gso_type SKB_GSO_TCPV4. On entry of transport layer gso handlers, verify that the gso_type matches the transport protocol. Fixes: 90017accff61 ("sctp: Add GSO support") Link: http://lkml.kernel.org/r/<001a1137452496ffc305617e5fe0@google.com> Reported-by: syzbot+fee64147a25aecd48055@syzkaller.appspotmail.com Signed-off-by: Willem de Bruijn Acked-by: Jason Wang Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/esp4_offload.c | 3 +++ net/ipv4/tcp_offload.c | 3 +++ net/ipv4/udp_offload.c | 3 +++ net/ipv6/esp6_offload.c | 3 +++ net/ipv6/tcpv6_offload.c | 3 +++ net/ipv6/udp_offload.c | 3 +++ net/sctp/offload.c | 3 +++ 7 files changed, 21 insertions(+) diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index f8b918c766b0af..56c49623bb9d73 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -121,6 +121,9 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, if (!xo) goto out; + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP)) + goto out; + seq = xo->seq.low; x = skb->sp->xvec[skb->sp->len - 1]; diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index b6a2aa1dcf56cb..4d58e2ce0b5b18 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -32,6 +32,9 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, netdev_features_t features) { + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)) + return ERR_PTR(-EINVAL); + if (!pskb_may_pull(skb, sizeof(struct tcphdr))) return ERR_PTR(-EINVAL); diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 01801b77bd0da4..ea6e6e7df0eec8 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -203,6 +203,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, goto out; } + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP)) + goto out; + if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto out; diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 333a478aa16104..1ea9d794447e78 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -148,6 +148,9 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, if (!xo) goto out; + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP)) + goto out; + seq = xo->seq.low; x = skb->sp->xvec[skb->sp->len - 1]; diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index d883c9204c01d5..278e49cd67d4e2 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -46,6 +46,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb, { struct tcphdr *th; + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) + return ERR_PTR(-EINVAL); + if (!pskb_may_pull(skb, sizeof(*th))) return ERR_PTR(-EINVAL); diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index a0f89ad76f9d22..2a04dc9c781b5f 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -42,6 +42,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, const struct ipv6hdr *ipv6h; struct udphdr *uh; + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP)) + goto out; + if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto out; diff --git a/net/sctp/offload.c b/net/sctp/offload.c index 275925b93b2909..35bc7106d1827a 100644 --- a/net/sctp/offload.c +++ b/net/sctp/offload.c @@ -45,6 +45,9 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb, struct sk_buff *segs = ERR_PTR(-EINVAL); struct sctphdr *sh; + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP)) + goto out; + sh = sctp_hdr(skb); if (!pskb_may_pull(skb, sizeof(*sh))) goto out; From 8496f7dbb1e5210e272b3a0e6f1fd20c28c8890e Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 24 Jan 2018 10:02:09 +0100 Subject: [PATCH 1510/3326] mlxsw: spectrum_router: Don't log an error on missing neighbor [ Upstream commit 1ecdaea02ca6bfacf2ecda500dc1af51e9780c42 ] Driver periodically samples all neighbors configured in device in order to update the kernel regarding their state. When finding an entry configured in HW that doesn't show in neigh_lookup() driver logs an error message. This introduces a race when removing multiple neighbors - it's possible that a given entry would still be configured in HW as its removal is still being processed but is already removed from the kernel's neighbor tables. Simply remove the error message and gracefully accept such events. Fixes: c723c735fa6b ("mlxsw: spectrum_router: Periodically update the kernel's neigh table") Fixes: 60f040ca11b9 ("mlxsw: spectrum_router: Periodically dump active IPv6 neighbours") Signed-off-by: Yuval Mintz Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index c23cc51bb5a5ba..7bef8067646438 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1531,11 +1531,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp, dipn = htonl(dip); dev = mlxsw_sp->router->rifs[rif]->dev; n = neigh_lookup(&arp_tbl, &dipn, dev); - if (!n) { - netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n", - &dip); + if (!n) return; - } netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip); neigh_event_send(n, NULL); @@ -1562,11 +1559,8 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, dev = mlxsw_sp->router->rifs[rif]->dev; n = neigh_lookup(&nd_tbl, &dip, dev); - if (!n) { - netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n", - &dip); + if (!n) return; - } netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip); neigh_event_send(n, NULL); From 51c1f513fe965c5136be85d8af46d99166e0b121 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Mon, 15 Jan 2018 11:37:29 -0800 Subject: [PATCH 1511/3326] tun: fix a memory leak for tfile->tx_array [ Upstream commit 4df0bfc79904b7169dc77dcce44598b1545721f9 ] tfile->tun could be detached before we close the tun fd, via tun_detach_all(), so it should not be used to check for tfile->tx_array. As Jason suggested, we probably have to clean it up unconditionally both in __tun_deatch() and tun_detach_all(), but this requires to check if it is initialized or not. Currently skb_array_cleanup() doesn't have such a check, so I check it in the caller and introduce a helper function, it is a bit ugly but we can always improve it in net-next. Reported-by: Dmitry Vyukov Fixes: 1576d9860599 ("tun: switch to use skb array for tx") Cc: Jason Wang Signed-off-by: Cong Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/tun.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index c91b110f21699b..fa51b7b0e9eaba 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -534,6 +534,14 @@ static void tun_queue_purge(struct tun_file *tfile) skb_queue_purge(&tfile->sk.sk_error_queue); } +static void tun_cleanup_tx_array(struct tun_file *tfile) +{ + if (tfile->tx_array.ring.queue) { + skb_array_cleanup(&tfile->tx_array); + memset(&tfile->tx_array, 0, sizeof(tfile->tx_array)); + } +} + static void __tun_detach(struct tun_file *tfile, bool clean) { struct tun_file *ntfile; @@ -575,8 +583,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun->dev->reg_state == NETREG_REGISTERED) unregister_netdevice(tun->dev); } - if (tun) - skb_array_cleanup(&tfile->tx_array); + tun_cleanup_tx_array(tfile); sock_put(&tfile->sk); } } @@ -616,11 +623,13 @@ static void tun_detach_all(struct net_device *dev) /* Drop read queue */ tun_queue_purge(tfile); sock_put(&tfile->sk); + tun_cleanup_tx_array(tfile); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { tun_enable_queue(tfile); tun_queue_purge(tfile); sock_put(&tfile->sk); + tun_cleanup_tx_array(tfile); } BUG_ON(tun->numdisabled != 0); @@ -2624,6 +2633,8 @@ static int tun_chr_open(struct inode *inode, struct file * file) sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); + memset(&tfile->tx_array, 0, sizeof(tfile->tx_array)); + return 0; } From 80f327285cab6ab8e10c3ea03278add5dd99fb02 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jan 2018 14:21:13 -0800 Subject: [PATCH 1512/3326] flow_dissector: properly cap thoff field [ Upstream commit d0c081b49137cd3200f2023c0875723be66e7ce5 ] syzbot reported yet another crash [1] that is caused by insufficient validation of DODGY packets. Two bugs are happening here to trigger the crash. 1) Flow dissection leaves with incorrect thoff field. 2) skb_probe_transport_header() sets transport header to this invalid thoff, even if pointing after skb valid data. 3) qdisc_pkt_len_init() reads out-of-bound data because it trusts tcp_hdrlen(skb) Possible fixes : - Full flow dissector validation before injecting bad DODGY packets in the stack. This approach was attempted here : https://patchwork.ozlabs.org/patch/ 861874/ - Have more robust functions in the core. This might be needed anyway for stable versions. This patch fixes the flow dissection issue. [1] CPU: 1 PID: 3144 Comm: syzkaller271204 Not tainted 4.15.0-rc4-mm1+ #49 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:17 [inline] dump_stack+0x194/0x257 lib/dump_stack.c:53 print_address_description+0x73/0x250 mm/kasan/report.c:256 kasan_report_error mm/kasan/report.c:355 [inline] kasan_report+0x23b/0x360 mm/kasan/report.c:413 __asan_report_load2_noabort+0x14/0x20 mm/kasan/report.c:432 __tcp_hdrlen include/linux/tcp.h:35 [inline] tcp_hdrlen include/linux/tcp.h:40 [inline] qdisc_pkt_len_init net/core/dev.c:3160 [inline] __dev_queue_xmit+0x20d3/0x2200 net/core/dev.c:3465 dev_queue_xmit+0x17/0x20 net/core/dev.c:3554 packet_snd net/packet/af_packet.c:2943 [inline] packet_sendmsg+0x3ad5/0x60a0 net/packet/af_packet.c:2968 sock_sendmsg_nosec net/socket.c:628 [inline] sock_sendmsg+0xca/0x110 net/socket.c:638 sock_write_iter+0x31a/0x5d0 net/socket.c:907 call_write_iter include/linux/fs.h:1776 [inline] new_sync_write fs/read_write.c:469 [inline] __vfs_write+0x684/0x970 fs/read_write.c:482 vfs_write+0x189/0x510 fs/read_write.c:544 SYSC_write fs/read_write.c:589 [inline] SyS_write+0xef/0x220 fs/read_write.c:581 entry_SYSCALL_64_fastpath+0x1f/0x96 Fixes: 34fad54c2537 ("net: __skb_flow_dissect() must cap its return value") Fixes: a6e544b0a88b ("flow_dissector: Jump to exit code in __skb_flow_dissect") Signed-off-by: Eric Dumazet Cc: Willem de Bruijn Reported-by: syzbot Acked-by: Jason Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/flow_dissector.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 0a977373d0033a..f950b80c0dd1b4 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -876,8 +876,8 @@ bool __skb_flow_dissect(const struct sk_buff *skb, out_good: ret = true; - key_control->thoff = (u16)nhoff; out: + key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); key_basic->n_proto = proto; key_basic->ip_proto = ip_proto; @@ -885,7 +885,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb, out_bad: ret = false; - key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); goto out; } EXPORT_SYMBOL(__skb_flow_dissect); From 3c6e5f2f5ef11bdba41e8158cc3c3e8ee6cfd198 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 15 Jan 2018 17:01:19 +0800 Subject: [PATCH 1513/3326] sctp: reinit stream if stream outcnt has been change by sinit in sendmsg [ Upstream commit 625637bf4afa45204bd87e4218645182a919485a ] After introducing sctp_stream structure, sctp uses stream->outcnt as the out stream nums instead of c.sinit_num_ostreams. However when users use sinit in cmsg, it only updates c.sinit_num_ostreams in sctp_sendmsg. At that moment, stream->outcnt is still using previous value. If it's value is not updated, the sinit_num_ostreams of sinit could not really work. This patch is to fix it by updating stream->outcnt and reiniting stream if stream outcnt has been change by sinit in sendmsg. Fixes: a83863174a61 ("sctp: prepare asoc stream for stream reconf") Signed-off-by: Xin Long Acked-by: Neil Horman Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 7d432266645c32..1c08d86efe9494 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1880,8 +1880,14 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) */ if (sinit) { if (sinit->sinit_num_ostreams) { - asoc->c.sinit_num_ostreams = - sinit->sinit_num_ostreams; + __u16 outcnt = sinit->sinit_num_ostreams; + + asoc->c.sinit_num_ostreams = outcnt; + /* outcnt has been changed, so re-init stream */ + err = sctp_stream_init(&asoc->stream, outcnt, 0, + GFP_KERNEL); + if (err) + goto out_free; } if (sinit->sinit_max_instreams) { asoc->c.sinit_max_instreams = From 3eae0ba8c9b51d83d2293249fb9c82e90fc6dc31 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 10 Jan 2018 13:00:39 -0800 Subject: [PATCH 1514/3326] netlink: extack needs to be reset each time through loop [ Upstream commit cbbdf8433a5f117b1a2119ea30fc651b61ef7570 ] syzbot triggered the WARN_ON in netlink_ack testing the bad_attr value. The problem is that netlink_rcv_skb loops over the skb repeatedly invoking the callback and without resetting the extack leaving potentially stale data. Initializing each time through avoids the WARN_ON. Fixes: 2d4bc93368f5a ("netlink: extended ACK reporting") Reported-by: syzbot+315fa6766d0f7c359327@syzkaller.appspotmail.com Signed-off-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/netlink/af_netlink.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index aac9d68b463604..f4eb3fe59fe585 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2393,7 +2393,7 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *)) { - struct netlink_ext_ack extack = {}; + struct netlink_ext_ack extack; struct nlmsghdr *nlh; int err; @@ -2414,6 +2414,7 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, if (nlh->nlmsg_type < NLMSG_MIN_TYPE) goto ack; + memset(&extack, 0, sizeof(extack)); err = cb(skb, nlh, &extack); if (err == -EINTR) goto skip; From 4f97adffa33f41f5b9d4b9b01291f12bb74afe22 Mon Sep 17 00:00:00 2001 From: Talat Batheesh Date: Sun, 21 Jan 2018 05:30:42 +0200 Subject: [PATCH 1515/3326] net/mlx5e: Fix fixpoint divide exception in mlx5e_am_stats_compare [ Upstream commit e58edaa4863583b54409444f11b4f80dff0af1cd ] Helmut reported a bug about division by zero while running traffic and doing physical cable pull test. When the cable unplugged the ppms become zero, so when dividing the current ppms by the previous ppms in the next dim iteration there is division by zero. This patch prevent this division for both ppms and epms. Fixes: c3164d2fc48f ("net/mlx5e: Added BW check for DIM decision mechanism") Reported-by: Helmut Grauer Signed-off-by: Talat Batheesh Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index acf32fe952cdef..3d3b1f97dc2733 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -197,9 +197,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : MLX5E_AM_STATS_WORSE; + if (!prev->ppms) + return curr->ppms ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_SAME; + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : MLX5E_AM_STATS_WORSE; + if (!prev->epms) + return MLX5E_AM_STATS_SAME; if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : From a8c21ba721d4bfc614ecb46f9507cc042b6a6d6b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 15 Jan 2018 11:47:53 -0800 Subject: [PATCH 1516/3326] nfp: use the correct index for link speed table [ Upstream commit 0d9c9f0f40ca262b67fc06a702b85f3976f5e1a1 ] sts variable is holding link speed as well as state. We should be using ls to index into ls_to_ethtool. Fixes: 265aeb511bd5 ("nfp: add support for .get_link_ksettings()") Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index dc016dfec64d65..8e623d8fa78ee9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -306,7 +306,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev, ls >= ARRAY_SIZE(ls_to_ethtool)) return 0; - cmd->base.speed = ls_to_ethtool[sts]; + cmd->base.speed = ls_to_ethtool[ls]; cmd->base.duplex = DUPLEX_FULL; return 0; From 48606bb1eef7c46f154cb4a6c5509729e60a4e91 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 18 Jan 2018 14:48:03 +0800 Subject: [PATCH 1517/3326] netlink: reset extack earlier in netlink_rcv_skb [ Upstream commit cd443f1e91ca600a092e780e8250cd6a2954b763 ] Move up the extack reset/initialization in netlink_rcv_skb, so that those 'goto ack' will not skip it. Otherwise, later on netlink_ack may use the uninitialized extack and cause kernel crash. Fixes: cbbdf8433a5f ("netlink: extack needs to be reset each time through loop") Reported-by: syzbot+03bee3680a37466775e7@syzkaller.appspotmail.com Signed-off-by: Xin Long Acked-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/netlink/af_netlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index f4eb3fe59fe585..533fd0503ba039 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2400,6 +2400,7 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, while (skb->len >= nlmsg_total_size(0)) { int msglen; + memset(&extack, 0, sizeof(extack)); nlh = nlmsg_hdr(skb); err = 0; @@ -2414,7 +2415,6 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, if (nlh->nlmsg_type < NLMSG_MIN_TYPE) goto ack; - memset(&extack, 0, sizeof(extack)); err = cb(skb, nlh, &extack); if (err == -EINTR) goto skip; From a022bbe393fbe3a1f471ee94d846be03f7fe2136 Mon Sep 17 00:00:00 2001 From: Ilya Lesokhin Date: Tue, 16 Jan 2018 15:31:52 +0200 Subject: [PATCH 1518/3326] net/tls: Only attach to sockets in ESTABLISHED state [ Upstream commit d91c3e17f75f218022140dee18cf515292184a8f ] Calling accept on a TCP socket with a TLS ulp attached results in two sockets that share the same ulp context. The ulp context is freed while a socket is destroyed, so after one of the sockets is released, the second second will trigger a use after free when it tries to access the ulp context attached to it. We restrict the TLS ulp to sockets in ESTABLISHED state to prevent the scenario above. Fixes: 3c4d7559159b ("tls: kernel TLS support") Reported-by: syzbot+904e7cd6c5c741609228@syzkaller.appspotmail.com Signed-off-by: Ilya Lesokhin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tls/tls_main.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 60aff60e30ad41..70757098f79bdb 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -444,6 +444,15 @@ static int tls_init(struct sock *sk) struct tls_context *ctx; int rc = 0; + /* The TLS ulp is currently supported only for TCP sockets + * in ESTABLISHED state. + * Supporting sockets in LISTEN state will require us + * to modify the accept implementation to clone rather then + * share the ulp context. + */ + if (sk->sk_state != TCP_ESTABLISHED) + return -ENOTSUPP; + /* allocate tls context */ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { From 3a28f04bc4c20e56d75327165f2922a525225b7c Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Tue, 16 Jan 2018 16:04:26 +0100 Subject: [PATCH 1519/3326] tls: fix sw_ctx leak [ Upstream commit cf6d43ef66f416282121f436ce1bee9a25199d52 ] During setsockopt(SOL_TCP, TLS_TX), if initialization of the software context fails in tls_set_sw_offload(), we leak sw_ctx. We also don't reassign ctx->priv_ctx to NULL, so we can't even do another attempt to set it up on the same socket, as it will fail with -EEXIST. Fixes: 3c4d7559159b ('tls: kernel TLS support') Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tls/tls_sw.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 81cc4325682fd9..83f886d7c1f8b8 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -697,18 +697,17 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx) } default: rc = -EINVAL; - goto out; + goto free_priv; } ctx->prepend_size = TLS_HEADER_SIZE + nonce_size; ctx->tag_size = tag_size; ctx->overhead_size = ctx->prepend_size + ctx->tag_size; ctx->iv_size = iv_size; - ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, - GFP_KERNEL); + ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, GFP_KERNEL); if (!ctx->iv) { rc = -ENOMEM; - goto out; + goto free_priv; } memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); @@ -756,7 +755,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx) rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size); if (!rc) - goto out; + return 0; free_aead: crypto_free_aead(sw_ctx->aead_send); @@ -767,6 +766,9 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx) free_iv: kfree(ctx->iv); ctx->iv = NULL; +free_priv: + kfree(ctx->priv_ctx); + ctx->priv_ctx = NULL; out: return rc; } From 2f54941c886c9f4d788ab0d4a81830a177ab7fd1 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Tue, 16 Jan 2018 16:04:27 +0100 Subject: [PATCH 1520/3326] tls: return -EBUSY if crypto_info is already set [ Upstream commit 877d17c79b66466942a836403773276e34fe3614 ] do_tls_setsockopt_tx returns 0 without doing anything when crypto_info is already set. Silent failure is confusing for users. Fixes: 3c4d7559159b ("tls: kernel TLS support") Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tls/tls_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 70757098f79bdb..06688edb68557e 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -364,8 +364,10 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval, crypto_info = &ctx->crypto_send; /* Currently we don't support set crypto info more than one time */ - if (TLS_CRYPTO_INFO_READY(crypto_info)) + if (TLS_CRYPTO_INFO_READY(crypto_info)) { + rc = -EBUSY; goto out; + } switch (tmp_crypto_info.cipher_type) { case TLS_CIPHER_AES_GCM_128: { From ed10b9affb3af663d2cdb8a1e9134fb0a6358ab0 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Tue, 16 Jan 2018 16:04:28 +0100 Subject: [PATCH 1521/3326] tls: reset crypto_info when do_tls_setsockopt_tx fails [ Upstream commit 6db959c82eb039a151d95a0f8b7dea643657327a ] The current code copies directly from userspace to ctx->crypto_send, but doesn't always reinitialize it to 0 on failure. This causes any subsequent attempt to use this setsockopt to fail because of the TLS_CRYPTO_INFO_READY check, eventhough crypto_info is not actually ready. This should result in a correctly set up socket after the 3rd call, but currently it does not: size_t s = sizeof(struct tls12_crypto_info_aes_gcm_128); struct tls12_crypto_info_aes_gcm_128 crypto_good = { .info.version = TLS_1_2_VERSION, .info.cipher_type = TLS_CIPHER_AES_GCM_128, }; struct tls12_crypto_info_aes_gcm_128 crypto_bad_type = crypto_good; crypto_bad_type.info.cipher_type = 42; setsockopt(sock, SOL_TLS, TLS_TX, &crypto_bad_type, s); setsockopt(sock, SOL_TLS, TLS_TX, &crypto_good, s - 1); setsockopt(sock, SOL_TLS, TLS_TX, &crypto_good, s); Fixes: 3c4d7559159b ("tls: kernel TLS support") Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tls/tls_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 06688edb68557e..282361ac026324 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -373,7 +373,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval, case TLS_CIPHER_AES_GCM_128: { if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) { rc = -EINVAL; - goto out; + goto err_crypto_info; } rc = copy_from_user( crypto_info, @@ -388,7 +388,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval, } default: rc = -EINVAL; - goto out; + goto err_crypto_info; } ctx->sk_write_space = sk->sk_write_space; From c2fd0b217068d6ef0b71bc5fbfd3c8f9f48c42eb Mon Sep 17 00:00:00 2001 From: Lorenzo Colitti Date: Thu, 11 Jan 2018 18:36:26 +0900 Subject: [PATCH 1522/3326] net: ipv4: Make "ip route get" match iif lo rules again. [ Upstream commit 6503a30440962f1e1ccb8868816b4e18201218d4 ] Commit 3765d35ed8b9 ("net: ipv4: Convert inet_rtm_getroute to rcu versions of route lookup") broke "ip route get" in the presence of rules that specify iif lo. Host-originated traffic always has iif lo, because ip_route_output_key_hash and ip6_route_output_flags set the flow iif to LOOPBACK_IFINDEX. Thus, putting "iif lo" in an ip rule is a convenient way to select only originated traffic and not forwarded traffic. inet_rtm_getroute used to match these rules correctly because even though it sets the flow iif to 0, it called ip_route_output_key which overwrites iif with LOOPBACK_IFINDEX. But now that it calls ip_route_output_key_hash_rcu, the ifindex will remain 0 and not match the iif lo in the rule. As a result, "ip route get" will return ENETUNREACH. Fixes: 3765d35ed8b9 ("net: ipv4: Convert inet_rtm_getroute to rcu versions of route lookup") Tested: https://android.googlesource.com/kernel/tests/+/master/net/test/multinetwork_test.py passes again Signed-off-by: Lorenzo Colitti Acked-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/route.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 804bead564db8e..0ba88efca7ada9 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2762,6 +2762,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, if (err == 0 && rt->dst.error) err = -rt->dst.error; } else { + fl4.flowi4_iif = LOOPBACK_IFINDEX; rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb); err = 0; if (IS_ERR(rt)) From 74026a188fe7f8739fbd9f6a960a1486ce6f9c96 Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Mon, 22 Jan 2018 16:06:37 -0500 Subject: [PATCH 1523/3326] vmxnet3: repair memory leak [ Upstream commit 848b159835ddef99cc4193083f7e786c3992f580 ] with the introduction of commit b0eb57cb97e7837ebb746404c2c58c6f536f23fa, it appears that rq->buf_info is improperly handled. While it is heap allocated when an rx queue is setup, and freed when torn down, an old line of code in vmxnet3_rq_destroy was not properly removed, leading to rq->buf_info[0] being set to NULL prior to its being freed, causing a memory leak, which eventually exhausts the system on repeated create/destroy operations (for example, when the mtu of a vmxnet3 interface is changed frequently. Fix is pretty straight forward, just move the NULL set to after the free. Tested by myself with successful results Applies to net, and should likely be queued for stable, please Signed-off-by: Neil Horman Reported-By: boyang@redhat.com CC: boyang@redhat.com CC: Shrikrishna Khare CC: "VMware, Inc." CC: David S. Miller Acked-by: Shrikrishna Khare Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/vmxnet3/vmxnet3_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index d1c7029ded7cef..cf95290b160c56 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1616,7 +1616,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, rq->rx_ring[i].basePA); rq->rx_ring[i].base = NULL; } - rq->buf_info[i] = NULL; } if (rq->data_ring.base) { @@ -1638,6 +1637,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, (rq->rx_ring[0].size + rq->rx_ring[1].size); dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], rq->buf_info_pa); + rq->buf_info[0] = rq->buf_info[1] = NULL; } } From 34c1acc2f7f5c26e8d0cd2f0b5eba8bd06d8e0b2 Mon Sep 17 00:00:00 2001 From: Xiao Liang Date: Mon, 22 Jan 2018 14:12:52 +0800 Subject: [PATCH 1524/3326] perf/x86/amd/power: Do not load AMD power module on !AMD platforms commit 40d4071ce2d20840d224b4a77b5dc6f752c9ab15 upstream. The AMD power module can be loaded on non AMD platforms, but unload fails with the following Oops: BUG: unable to handle kernel NULL pointer dereference at (null) IP: __list_del_entry_valid+0x29/0x90 Call Trace: perf_pmu_unregister+0x25/0xf0 amd_power_pmu_exit+0x1c/0xd23 [power] SyS_delete_module+0x1a8/0x2b0 ? exit_to_usermode_loop+0x8f/0xb0 entry_SYSCALL_64_fastpath+0x20/0x83 Return -ENODEV instead of 0 from the module init function if the CPU does not match. Fixes: c7ab62bfbe0e ("perf/x86/amd/power: Add AMD accumulated power reporting mechanism") Signed-off-by: Xiao Liang Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20180122061252.6394-1-xiliang@redhat.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/amd/power.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c index a6eee5ac4f581d..2aefacf5c5b2ab 100644 --- a/arch/x86/events/amd/power.c +++ b/arch/x86/events/amd/power.c @@ -277,7 +277,7 @@ static int __init amd_power_pmu_init(void) int ret; if (!x86_match_cpu(cpu_match)) - return 0; + return -ENODEV; if (!boot_cpu_has(X86_FEATURE_ACC_POWER)) return -ENODEV; From ac2cc887653808e988f9d7ece50aacc755a4f879 Mon Sep 17 00:00:00 2001 From: Jia Zhang Date: Tue, 23 Jan 2018 11:41:32 +0100 Subject: [PATCH 1525/3326] x86/microcode/intel: Extend BDW late-loading further with LLC size check commit 7e702d17ed138cf4ae7c00e8c00681ed464587c7 upstream. Commit b94b73733171 ("x86/microcode/intel: Extend BDW late-loading with a revision check") reduced the impact of erratum BDF90 for Broadwell model 79. The impact can be reduced further by checking the size of the last level cache portion per core. Tony: "The erratum says the problem only occurs on the large-cache SKUs. So we only need to avoid the update if we are on a big cache SKU that is also running old microcode." For more details, see erratum BDF90 in document #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family Specification Update) from September 2017. Fixes: b94b73733171 ("x86/microcode/intel: Extend BDW late-loading with a revision check") Signed-off-by: Jia Zhang Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Acked-by: Tony Luck Link: https://lkml.kernel.org/r/1516321542-31161-1-git-send-email-zhang.jia@linux.alibaba.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/microcode/intel.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index d9e460fc7a3b30..f7c55b0e753ad0 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -45,6 +45,9 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; /* Current microcode patch used in early patching on the APs. */ static struct microcode_intel *intel_ucode_patch; +/* last level cache size per core */ +static int llc_size_per_core; + static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, unsigned int s2, unsigned int p2) { @@ -912,12 +915,14 @@ static bool is_blacklisted(unsigned int cpu) /* * Late loading on model 79 with microcode revision less than 0x0b000021 - * may result in a system hang. This behavior is documented in item - * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family). + * and LLC size per core bigger than 2.5MB may result in a system hang. + * This behavior is documented in item BDF90, #334165 (Intel Xeon + * Processor E7-8800/4800 v4 Product Family). */ if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X && c->x86_mask == 0x01 && + llc_size_per_core > 2621440 && c->microcode < 0x0b000021) { pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); @@ -975,6 +980,15 @@ static struct microcode_ops microcode_intel_ops = { .apply_microcode = apply_microcode_intel, }; +static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) +{ + u64 llc_size = c->x86_cache_size * 1024; + + do_div(llc_size, c->x86_max_cores); + + return (int)llc_size; +} + struct microcode_ops * __init init_intel_microcode(void) { struct cpuinfo_x86 *c = &boot_cpu_data; @@ -985,5 +999,7 @@ struct microcode_ops * __init init_intel_microcode(void) return NULL; } + llc_size_per_core = calc_llc_size_per_core(c); + return µcode_intel_ops; } From cbfb351be483a239a2afaa4702826b0ad359fd1e Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 23 Jan 2018 11:41:33 +0100 Subject: [PATCH 1526/3326] x86/microcode: Fix again accessing initrd after having been freed commit 1d080f096fe33f031d26e19b3ef0146f66b8b0f1 upstream. Commit 24c2503255d3 ("x86/microcode: Do not access the initrd after it has been freed") fixed attempts to access initrd from the microcode loader after it has been freed. However, a similar KASAN warning was reported (stack trace edited): smpboot: Booting Node 0 Processor 1 APIC 0x11 ================================================================== BUG: KASAN: use-after-free in find_cpio_data+0x9b5/0xa50 Read of size 1 at addr ffff880035ffd000 by task swapper/1/0 CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.14.8-slack #7 Hardware name: System manufacturer System Product Name/A88X-PLUS, BIOS 3003 03/10/2016 Call Trace: dump_stack print_address_description kasan_report ? find_cpio_data __asan_report_load1_noabort find_cpio_data find_microcode_in_initrd __load_ucode_amd load_ucode_amd_ap load_ucode_ap After some investigation, it turned out that a merge was done using the wrong side to resolve, leading to picking up the previous state, before the 24c2503255d3 fix. Therefore the Fixes tag below contains a merge commit. Revert the mismerge by catching the save_microcode_in_initrd_amd() retval and thus letting the function exit with the last return statement so that initrd_gone can be set to true. Fixes: f26483eaedec ("Merge branch 'x86/urgent' into x86/microcode, to resolve conflicts") Reported-by: Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Link: https://bugzilla.kernel.org/show_bug.cgi?id=198295 Link: https://lkml.kernel.org/r/20180123104133.918-2-bp@alien8.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/microcode/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index c4fa4a85d4cb6f..e4fc595cd6eaa6 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -239,7 +239,7 @@ static int __init save_microcode_in_initrd(void) break; case X86_VENDOR_AMD: if (c->x86 >= 0x10) - return save_microcode_in_initrd_amd(cpuid_eax(1)); + ret = save_microcode_in_initrd_amd(cpuid_eax(1)); break; default: break; From ba07aba771974be3d3dfae5241c8d3db40363b26 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 25 Jan 2018 13:12:14 -0800 Subject: [PATCH 1527/3326] x86/mm/64: Fix vmapped stack syncing on very-large-memory 4-level systems commit 5beda7d54eafece4c974cfa9fbb9f60fb18fd20a upstream. Neil Berrington reported a double-fault on a VM with 768GB of RAM that uses large amounts of vmalloc space with PTI enabled. The cause is that load_new_mm_cr3() was never fixed to take the 5-level pgd folding code into account, so, on a 4-level kernel, the pgd synchronization logic compiles away to exactly nothing. Interestingly, the problem doesn't trigger with nopti. I assume this is because the kernel is mapped with global pages if we boot with nopti. The sequence of operations when we create a new task is that we first load its mm while still running on the old stack (which crashes if the old stack is unmapped in the new mm unless the TLB saves us), then we call prepare_switch_to(), and then we switch to the new stack. prepare_switch_to() pokes the new stack directly, which will populate the mapping through vmalloc_fault(). I assume that we're getting lucky on non-PTI systems -- the old stack's TLB entry stays alive long enough to make it all the way through prepare_switch_to() and switch_to() so that we make it to a valid stack. Fixes: b50858ce3e2a ("x86/mm/vmalloc: Add 5-level paging support") Reported-and-tested-by: Neil Berrington Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Cc: Konstantin Khlebnikov Cc: Dave Hansen Cc: Borislav Petkov Link: https://lkml.kernel.org/r/346541c56caed61abbe693d7d2742b4a380c5001.1516914529.git.luto@kernel.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/tlb.c | 34 +++++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index a1561957dccbb8..5bfe61a5e8e3c6 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -151,6 +151,34 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, local_irq_restore(flags); } +static void sync_current_stack_to_mm(struct mm_struct *mm) +{ + unsigned long sp = current_stack_pointer; + pgd_t *pgd = pgd_offset(mm, sp); + + if (CONFIG_PGTABLE_LEVELS > 4) { + if (unlikely(pgd_none(*pgd))) { + pgd_t *pgd_ref = pgd_offset_k(sp); + + set_pgd(pgd, *pgd_ref); + } + } else { + /* + * "pgd" is faked. The top level entries are "p4d"s, so sync + * the p4d. This compiles to approximately the same code as + * the 5-level case. + */ + p4d_t *p4d = p4d_offset(pgd, sp); + + if (unlikely(p4d_none(*p4d))) { + pgd_t *pgd_ref = pgd_offset_k(sp); + p4d_t *p4d_ref = p4d_offset(pgd_ref, sp); + + set_p4d(p4d, *p4d_ref); + } + } +} + void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { @@ -226,11 +254,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * mapped in the new pgd, we'll double-fault. Forcibly * map it. */ - unsigned int index = pgd_index(current_stack_pointer); - pgd_t *pgd = next->pgd + index; - - if (unlikely(pgd_none(*pgd))) - set_pgd(pgd, init_mm.pgd[index]); + sync_current_stack_to_mm(next); } /* Stop remote flushes for the previous mm */ From fdd88d753d4b3142f7cd38b0278c29b03c1e0929 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Jan 2018 14:54:32 +0100 Subject: [PATCH 1528/3326] hrtimer: Reset hrtimer cpu base proper on CPU hotplug commit d5421ea43d30701e03cadc56a38854c36a8b4433 upstream. The hrtimer interrupt code contains a hang detection and mitigation mechanism, which prevents that a long delayed hrtimer interrupt causes a continous retriggering of interrupts which prevent the system from making progress. If a hang is detected then the timer hardware is programmed with a certain delay into the future and a flag is set in the hrtimer cpu base which prevents newly enqueued timers from reprogramming the timer hardware prior to the chosen delay. The subsequent hrtimer interrupt after the delay clears the flag and resumes normal operation. If such a hang happens in the last hrtimer interrupt before a CPU is unplugged then the hang_detected flag is set and stays that way when the CPU is plugged in again. At that point the timer hardware is not armed and it cannot be armed because the hang_detected flag is still active, so nothing clears that flag. As a consequence the CPU does not receive hrtimer interrupts and no timers expire on that CPU which results in RCU stalls and other malfunctions. Clear the flag along with some other less critical members of the hrtimer cpu base to ensure starting from a clean state when a CPU is plugged in. Thanks to Paul, Sebastian and Anna-Maria for their help to get down to the root cause of that hard to reproduce heisenbug. Once understood it's trivial and certainly justifies a brown paperbag. Fixes: 41d2e4949377 ("hrtimer: Tune hrtimer_interrupt hang logic") Reported-by: Paul E. McKenney Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Sebastian Sewior Cc: Anna-Maria Gleixner Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801261447590.2067@nanos Signed-off-by: Greg Kroah-Hartman --- kernel/time/hrtimer.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 88f75f92ef368c..052773df9f03f9 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -655,7 +655,9 @@ static void hrtimer_reprogram(struct hrtimer *timer, static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { base->expires_next = KTIME_MAX; + base->hang_detected = 0; base->hres_active = 0; + base->next_timer = NULL; } /* @@ -1591,6 +1593,7 @@ int hrtimers_prepare_cpu(unsigned int cpu) timerqueue_init_head(&cpu_base->clock_base[i].active); } + cpu_base->active_bases = 0; cpu_base->cpu = cpu; hrtimer_init_hres(cpu_base); return 0; From 6fde36d5ce7ba4303865d5e11601cd3094e5909b Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 29 Jan 2018 00:36:42 +0100 Subject: [PATCH 1529/3326] bpf: introduce BPF_JIT_ALWAYS_ON config [ upstream commit 290af86629b25ffd1ed6232c4e9107da031705cb ] The BPF interpreter has been used as part of the spectre 2 attack CVE-2017-5715. A quote from goolge project zero blog: "At this point, it would normally be necessary to locate gadgets in the host kernel code that can be used to actually leak data by reading from an attacker-controlled location, shifting and masking the result appropriately and then using the result of that as offset to an attacker-controlled address for a load. But piecing gadgets together and figuring out which ones work in a speculation context seems annoying. So instead, we decided to use the eBPF interpreter, which is built into the host kernel - while there is no legitimate way to invoke it from inside a VM, the presence of the code in the host kernel's text section is sufficient to make it usable for the attack, just like with ordinary ROP gadgets." To make attacker job harder introduce BPF_JIT_ALWAYS_ON config option that removes interpreter from the kernel in favor of JIT-only mode. So far eBPF JIT is supported by: x64, arm64, arm32, sparc64, s390, powerpc64, mips64 The start of JITed program is randomized and code page is marked as read-only. In addition "constant blinding" can be turned on with net.core.bpf_jit_harden v2->v3: - move __bpf_prog_ret0 under ifdef (Daniel) v1->v2: - fix init order, test_bpf and cBPF (Daniel's feedback) - fix offloaded bpf (Jakub's feedback) - add 'return 0' dummy in case something can invoke prog->bpf_func - retarget bpf tree. For bpf-next the patch would need one extra hunk. It will be sent when the trees are merged back to net-next Considered doing: int bpf_jit_enable __read_mostly = BPF_EBPF_JIT_DEFAULT; but it seems better to land the patch as-is and in bpf-next remove bpf_jit_enable global variable from all JITs, consolidate in one place and remove this jit_init() function. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Signed-off-by: Greg Kroah-Hartman --- init/Kconfig | 7 +++++++ kernel/bpf/core.c | 19 +++++++++++++++++++ lib/test_bpf.c | 11 +++++++---- net/core/filter.c | 6 ++---- net/core/sysctl_net_core.c | 6 ++++++ net/socket.c | 9 +++++++++ 6 files changed, 50 insertions(+), 8 deletions(-) diff --git a/init/Kconfig b/init/Kconfig index 3c1faaa2af4aad..46075327c165dd 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1342,6 +1342,13 @@ config BPF_SYSCALL Enable the bpf() system call that allows to manipulate eBPF programs and maps via file descriptors. +config BPF_JIT_ALWAYS_ON + bool "Permanently enable BPF JIT and remove BPF interpreter" + depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT + help + Enables BPF JIT and removes BPF interpreter to avoid + speculative execution of BPF instructions by the interpreter + config SHMEM bool "Use full shmem filesystem" if EXPERT default y diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 7b62df86be1d7e..aaa44244d49824 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -760,6 +760,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) } EXPORT_SYMBOL_GPL(__bpf_call_base); +#ifndef CONFIG_BPF_JIT_ALWAYS_ON /** * __bpf_prog_run - run eBPF program on a given context * @ctx: is the data we are operating on @@ -1310,6 +1311,14 @@ EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) }; +#else +static unsigned int __bpf_prog_ret0(const void *ctx, + const struct bpf_insn *insn) +{ + return 0; +} +#endif + bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp) { @@ -1357,9 +1366,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) */ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) { +#ifndef CONFIG_BPF_JIT_ALWAYS_ON u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; +#else + fp->bpf_func = __bpf_prog_ret0; +#endif /* eBPF JITs can rewrite the program in case constant * blinding is active. However, in case of error during @@ -1368,6 +1381,12 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) * be JITed, but falls back to the interpreter. */ fp = bpf_int_jit_compile(fp); +#ifdef CONFIG_BPF_JIT_ALWAYS_ON + if (!fp->jited) { + *err = -ENOTSUPP; + return fp; + } +#endif bpf_prog_lock_ro(fp); /* The tail call compatibility check can only be done at diff --git a/lib/test_bpf.c b/lib/test_bpf.c index aa8812ae6776ee..6fbb73f3f531d6 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -6207,9 +6207,8 @@ static struct bpf_prog *generate_filter(int which, int *err) return NULL; } } - /* We don't expect to fail. */ if (*err) { - pr_cont("FAIL to attach err=%d len=%d\n", + pr_cont("FAIL to prog_create err=%d len=%d\n", *err, fprog.len); return NULL; } @@ -6233,6 +6232,10 @@ static struct bpf_prog *generate_filter(int which, int *err) * checks. */ fp = bpf_prog_select_runtime(fp, err); + if (*err) { + pr_cont("FAIL to select_runtime err=%d\n", *err); + return NULL; + } break; } @@ -6418,8 +6421,8 @@ static __init int test_bpf(void) pass_cnt++; continue; } - - return err; + err_cnt++; + continue; } pr_cont("jited:%u ", fp->jited); diff --git a/net/core/filter.c b/net/core/filter.c index 6ae94f825f72eb..29043bddc0db54 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1053,11 +1053,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) */ goto out_err_free; - /* We are guaranteed to never error here with cBPF to eBPF - * transitions, since there's no issue with type compatibility - * checks on program arrays. - */ fp = bpf_prog_select_runtime(fp, &err); + if (err) + goto out_err_free; kfree(old_prog); return fp; diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index cbc3dde4cfccca..a47ad6cd41c039 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -325,7 +325,13 @@ static struct ctl_table net_core_table[] = { .data = &bpf_jit_enable, .maxlen = sizeof(int), .mode = 0644, +#ifndef CONFIG_BPF_JIT_ALWAYS_ON .proc_handler = proc_dointvec +#else + .proc_handler = proc_dointvec_minmax, + .extra1 = &one, + .extra2 = &one, +#endif }, # ifdef CONFIG_HAVE_EBPF_JIT { diff --git a/net/socket.c b/net/socket.c index c729625eb5d36b..d894c7c5fa5498 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2642,6 +2642,15 @@ static int __init sock_init(void) core_initcall(sock_init); /* early initcall */ +static int __init jit_init(void) +{ +#ifdef CONFIG_BPF_JIT_ALWAYS_ON + bpf_jit_enable = 1; +#endif + return 0; +} +pure_initcall(jit_init); + #ifdef CONFIG_PROC_FS void socket_seq_show(struct seq_file *seq) { From 3ea4247ec1b7efc423cf4f75450ebf5cffab9ed8 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 29 Jan 2018 00:36:43 +0100 Subject: [PATCH 1530/3326] bpf: avoid false sharing of map refcount with max_entries [ upstream commit be95a845cc4402272994ce290e3ad928aff06cb9 ] In addition to commit b2157399cc98 ("bpf: prevent out-of-bounds speculation") also change the layout of struct bpf_map such that false sharing of fast-path members like max_entries is avoided when the maps reference counter is altered. Therefore enforce them to be placed into separate cachelines. pahole dump after change: struct bpf_map { const struct bpf_map_ops * ops; /* 0 8 */ struct bpf_map * inner_map_meta; /* 8 8 */ void * security; /* 16 8 */ enum bpf_map_type map_type; /* 24 4 */ u32 key_size; /* 28 4 */ u32 value_size; /* 32 4 */ u32 max_entries; /* 36 4 */ u32 map_flags; /* 40 4 */ u32 pages; /* 44 4 */ u32 id; /* 48 4 */ int numa_node; /* 52 4 */ bool unpriv_array; /* 56 1 */ /* XXX 7 bytes hole, try to pack */ /* --- cacheline 1 boundary (64 bytes) --- */ struct user_struct * user; /* 64 8 */ atomic_t refcnt; /* 72 4 */ atomic_t usercnt; /* 76 4 */ struct work_struct work; /* 80 32 */ char name[16]; /* 112 16 */ /* --- cacheline 2 boundary (128 bytes) --- */ /* size: 128, cachelines: 2, members: 17 */ /* sum members: 121, holes: 1, sum holes: 7 */ }; Now all entries in the first cacheline are read only throughout the life time of the map, set up once during map creation. Overall struct size and number of cachelines doesn't change from the reordering. struct bpf_map is usually first member and embedded in map structs in specific map implementations, so also avoid those members to sit at the end where it could potentially share the cacheline with first map values e.g. in the array since remote CPUs could trigger map updates just as well for those (easily dirtying members like max_entries intentionally as well) while having subsequent values in cache. Quoting from Google's Project Zero blog [1]: Additionally, at least on the Intel machine on which this was tested, bouncing modified cache lines between cores is slow, apparently because the MESI protocol is used for cache coherence [8]. Changing the reference counter of an eBPF array on one physical CPU core causes the cache line containing the reference counter to be bounced over to that CPU core, making reads of the reference counter on all other CPU cores slow until the changed reference counter has been written back to memory. Because the length and the reference counter of an eBPF array are stored in the same cache line, this also means that changing the reference counter on one physical CPU core causes reads of the eBPF array's length to be slow on other physical CPU cores (intentional false sharing). While this doesn't 'control' the out-of-bounds speculation through masking the index as in commit b2157399cc98, triggering a manipulation of the map's reference counter is really trivial, so lets not allow to easily affect max_entries from it. Splitting to separate cachelines also generally makes sense from a performance perspective anyway in that fast-path won't have a cache miss if the map gets pinned, reused in other progs, etc out of control path, thus also avoids unintentional false sharing. [1] https://googleprojectzero.blogspot.ch/2018/01/reading-privileged-memory-with-side.html Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Signed-off-by: Greg Kroah-Hartman --- include/linux/bpf.h | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 0bcf803f20de07..5c5be80ce802ce 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -42,7 +42,14 @@ struct bpf_map_ops { }; struct bpf_map { - atomic_t refcnt; + /* 1st cacheline with read-mostly members of which some + * are also accessed in fast-path (e.g. ops, max_entries). + */ + const struct bpf_map_ops *ops ____cacheline_aligned; + struct bpf_map *inner_map_meta; +#ifdef CONFIG_SECURITY + void *security; +#endif enum bpf_map_type map_type; u32 key_size; u32 value_size; @@ -52,11 +59,15 @@ struct bpf_map { u32 id; int numa_node; bool unpriv_array; - struct user_struct *user; - const struct bpf_map_ops *ops; - struct work_struct work; + /* 7 bytes hole */ + + /* 2nd cacheline with misc members to avoid false sharing + * particularly with refcounting. + */ + struct user_struct *user ____cacheline_aligned; + atomic_t refcnt; atomic_t usercnt; - struct bpf_map *inner_map_meta; + struct work_struct work; }; /* function argument constraints */ From 6eca013bef784aaf7be1a6262e3fe2de75476166 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 29 Jan 2018 00:36:44 +0100 Subject: [PATCH 1531/3326] bpf: fix divides by zero [ upstream commit c366287ebd698ef5e3de300d90cd62ee9ee7373e ] Divides by zero are not nice, lets avoid them if possible. Also do_div() seems not needed when dealing with 32bit operands, but this seems a minor detail. Fixes: bd4cf0ed331a ("net: filter: rework/optimize internal BPF interpreter's instruction set") Signed-off-by: Eric Dumazet Reported-by: syzbot Signed-off-by: Alexei Starovoitov Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index aaa44244d49824..2246115365d99f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -949,7 +949,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, DST = tmp; CONT; ALU_MOD_X: - if (unlikely(SRC == 0)) + if (unlikely((u32)SRC == 0)) return 0; tmp = (u32) DST; DST = do_div(tmp, (u32) SRC); @@ -968,7 +968,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, DST = div64_u64(DST, SRC); CONT; ALU_DIV_X: - if (unlikely(SRC == 0)) + if (unlikely((u32)SRC == 0)) return 0; tmp = (u32) DST; do_div(tmp, (u32) SRC); From ca0a0967205b65a197dccb80688f3588cc14d745 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 29 Jan 2018 00:36:45 +0100 Subject: [PATCH 1532/3326] bpf: fix 32-bit divide by zero [ upstream commit 68fda450a7df51cff9e5a4d4a4d9d0d5f2589153 ] due to some JITs doing if (src_reg == 0) check in 64-bit mode for div/mod operations mask upper 32-bits of src register before doing the check Fixes: 622582786c9e ("net: filter: x86: internal BPF JIT") Fixes: 7a12b5031c6b ("sparc64: Add eBPF JIT.") Reported-by: syzbot+48340bb518e88849e2e3@syzkaller.appspotmail.com Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 18 ++++++++++++++++++ net/core/filter.c | 4 ++++ 2 files changed, 22 insertions(+) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 75a5c3312f4603..ab2be68e376a2f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4304,6 +4304,24 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) int i, cnt, delta = 0; for (i = 0; i < insn_cnt; i++, insn++) { + if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) || + insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { + /* due to JIT bugs clear upper 32-bits of src register + * before div/mod operation + */ + insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg); + insn_buf[1] = *insn; + cnt = 2; + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); + if (!new_prog) + return -ENOMEM; + + delta += cnt - 1; + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + continue; + } + if (insn->code != (BPF_JMP | BPF_CALL)) continue; diff --git a/net/core/filter.c b/net/core/filter.c index 29043bddc0db54..d5158a10ac8f60 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -457,6 +457,10 @@ static int bpf_convert_filter(struct sock_filter *prog, int len, convert_bpf_extensions(fp, &insn)) break; + if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) || + fp->code == (BPF_ALU | BPF_MOD | BPF_X)) + *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); + *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); break; From a17536742bb9a5df561cce54c7cc3cd1e2cd480d Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 29 Jan 2018 00:36:46 +0100 Subject: [PATCH 1533/3326] bpf: reject stores into ctx via st and xadd [ upstream commit f37a8cb84cce18762e8f86a70bd6a49a66ab964c ] Alexei found that verifier does not reject stores into context via BPF_ST instead of BPF_STX. And while looking at it, we also should not allow XADD variant of BPF_STX. The context rewriter is only assuming either BPF_LDX_MEM- or BPF_STX_MEM-type operations, thus reject anything other than that so that assumptions in the rewriter properly hold. Add test cases as well for BPF selftests. Fixes: d691f9e8d440 ("bpf: allow programs to write to certain skb fields") Reported-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 19 ++++++++++++++ tools/testing/selftests/bpf/test_verifier.c | 29 +++++++++++++++++++-- 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ab2be68e376a2f..f9339c3219bc4d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -986,6 +986,13 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno) return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]); } +static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) +{ + const struct bpf_reg_state *reg = &env->cur_state.regs[regno]; + + return reg->type == PTR_TO_CTX; +} + static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, int off, int size, bool strict) { @@ -1258,6 +1265,12 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins return -EACCES; } + if (is_ctx_reg(env, insn->dst_reg)) { + verbose("BPF_XADD stores into R%d context is not allowed\n", + insn->dst_reg); + return -EACCES; + } + /* check whether atomic_add can read the memory */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1); @@ -3859,6 +3872,12 @@ static int do_check(struct bpf_verifier_env *env) if (err) return err; + if (is_ctx_reg(env, insn->dst_reg)) { + verbose("BPF_ST stores into R%d context is not allowed\n", + insn->dst_reg); + return -EACCES; + } + /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 1241487de93f9e..16299939d3fff5 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -2595,6 +2595,29 @@ static struct bpf_test tests[] = { .result = ACCEPT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, + { + "context stores via ST", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0), + BPF_EXIT_INSN(), + }, + .errstr = "BPF_ST stores into R1 context is not allowed", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "context stores via XADD", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1, + BPF_REG_0, offsetof(struct __sk_buff, mark), 0), + BPF_EXIT_INSN(), + }, + .errstr = "BPF_XADD stores into R1 context is not allowed", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, { "direct packet access: test1", .insns = { @@ -4317,7 +4340,8 @@ static struct bpf_test tests[] = { .fixup_map1 = { 2 }, .errstr_unpriv = "R2 leaks addr into mem", .result_unpriv = REJECT, - .result = ACCEPT, + .result = REJECT, + .errstr = "BPF_XADD stores into R1 context is not allowed", }, { "leak pointer into ctx 2", @@ -4331,7 +4355,8 @@ static struct bpf_test tests[] = { }, .errstr_unpriv = "R10 leaks addr into mem", .result_unpriv = REJECT, - .result = ACCEPT, + .result = REJECT, + .errstr = "BPF_XADD stores into R1 context is not allowed", }, { "leak pointer into ctx 3", From c43db1a3c7caf82a6d59401e617fd5c6fc0bf40d Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 29 Jan 2018 00:36:47 +0100 Subject: [PATCH 1534/3326] bpf, arm64: fix stack_depth tracking in combination with tail calls [ upstream commit a2284d912bfc865cdca4c00488e08a3550f9a405 ] Using dynamic stack_depth tracking in arm64 JIT is currently broken in combination with tail calls. In prologue, we cache ctx->stack_size and adjust SP reg for setting up function call stack, and tearing it down again in epilogue. Problem is that when doing a tail call, the cached ctx->stack_size might not be the same. One way to fix the problem with minimal overhead is to re-adjust SP in emit_bpf_tail_call() and properly adjust it to the current program's ctx->stack_size. Tested on Cavium ThunderX ARMv8. Fixes: f1c9eed7f437 ("bpf, arm64: take advantage of stack_depth tracking") Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Signed-off-by: Greg Kroah-Hartman --- arch/arm64/net/bpf_jit_comp.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index ba38d403abb2fc..bb32f7f6dd0f96 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -148,7 +148,8 @@ static inline int epilogue_offset(const struct jit_ctx *ctx) /* Stack must be multiples of 16B */ #define STACK_ALIGN(sz) (((sz) + 15) & ~15) -#define PROLOGUE_OFFSET 8 +/* Tail call offset to jump into */ +#define PROLOGUE_OFFSET 7 static int build_prologue(struct jit_ctx *ctx) { @@ -200,19 +201,19 @@ static int build_prologue(struct jit_ctx *ctx) /* Initialize tail_call_cnt */ emit(A64_MOVZ(1, tcc, 0, 0), ctx); - /* 4 byte extra for skb_copy_bits buffer */ - ctx->stack_size = prog->aux->stack_depth + 4; - ctx->stack_size = STACK_ALIGN(ctx->stack_size); - - /* Set up function call stack */ - emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); - cur_offset = ctx->idx - idx0; if (cur_offset != PROLOGUE_OFFSET) { pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", cur_offset, PROLOGUE_OFFSET); return -1; } + + /* 4 byte extra for skb_copy_bits buffer */ + ctx->stack_size = prog->aux->stack_depth + 4; + ctx->stack_size = STACK_ALIGN(ctx->stack_size); + + /* Set up function call stack */ + emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); return 0; } @@ -260,11 +261,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit(A64_LDR64(prg, tmp, prg), ctx); emit(A64_CBZ(1, prg, jmp_offset), ctx); - /* goto *(prog->bpf_func + prologue_size); */ + /* goto *(prog->bpf_func + prologue_offset); */ off = offsetof(struct bpf_prog, bpf_func); emit_a64_mov_i64(tmp, off, ctx); emit(A64_LDR64(tmp, prg, tmp), ctx); emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx); + emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); emit(A64_BR(tmp), ctx); /* out: */ From c83189edf416ac10b55ca48292158df460c13cd5 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 18 Dec 2017 02:15:32 +0100 Subject: [PATCH 1535/3326] cpufreq: governor: Ensure sufficiently large sampling intervals commit 56026645e2b6f11ede34a5e6ab69d3eb56f9c8fc upstream. After commit aa7519af450d (cpufreq: Use transition_delay_us for legacy governors as well) the sampling_rate field of struct dbs_data may be less than the tick period which causes dbs_update() to produce incorrect results, so make the code ensure that the value of that field will always be sufficiently large. Fixes: aa7519af450d (cpufreq: Use transition_delay_us for legacy governors as well) Reported-by: Andy Tang Reported-by: Doug Smythies Tested-by: Andy Tang Signed-off-by: Rafael J. Wysocki Acked-by: Viresh Kumar Signed-off-by: Greg Kroah-Hartman --- drivers/cpufreq/cpufreq_governor.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 58d4f4e1ad6a90..ca38229b045ab2 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -22,6 +22,8 @@ #include "cpufreq_governor.h" +#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC) + static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs); static DEFINE_MUTEX(gov_dbs_data_mutex); @@ -47,11 +49,15 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, { struct dbs_data *dbs_data = to_dbs_data(attr_set); struct policy_dbs_info *policy_dbs; + unsigned int sampling_interval; int ret; - ret = sscanf(buf, "%u", &dbs_data->sampling_rate); - if (ret != 1) + + ret = sscanf(buf, "%u", &sampling_interval); + if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL) return -EINVAL; + dbs_data->sampling_rate = sampling_interval; + /* * We are operating under dbs_data->mutex and so the list and its * entries can't be freed concurrently. @@ -430,7 +436,14 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) if (ret) goto free_policy_dbs_info; - dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy); + /* + * The sampling interval should not be less than the transition latency + * of the CPU and it also cannot be too small for dbs_update() to work + * correctly. + */ + dbs_data->sampling_rate = max_t(unsigned int, + CPUFREQ_DBS_MIN_SAMPLING_INTERVAL, + cpufreq_policy_transition_delay_us(policy)); if (!have_governor_per_policy()) gov->gdbs_data = dbs_data; From 54e67ba7d20a5921cfe712cfe4bd773e75df10e0 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 22 Jan 2018 20:11:06 +0000 Subject: [PATCH 1536/3326] nfsd: auth: Fix gid sorting when rootsquash enabled commit 1995266727fa8143897e89b55f5d3c79aa828420 upstream. Commit bdcf0a423ea1 ("kernel: make groups_sort calling a responsibility group_info allocators") appears to break nfsd rootsquash in a pretty major way. It adds a call to groups_sort() inside the loop that copies/squashes gids, which means the valid gids are sorted along with the following garbage. The net result is that the highest numbered valid gids are replaced with any lower-valued garbage gids, possibly including 0. We should sort only once, after filling in all the gids. Fixes: bdcf0a423ea1 ("kernel: make groups_sort calling a responsibility ...") Signed-off-by: Ben Hutchings Acked-by: J. Bruce Fields Signed-off-by: Linus Torvalds Cc: Wolfgang Walter Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/auth.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c index f650e475d8f0d8..fdf2aad7347090 100644 --- a/fs/nfsd/auth.c +++ b/fs/nfsd/auth.c @@ -60,10 +60,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp) gi->gid[i] = exp->ex_anon_gid; else gi->gid[i] = rqgi->gid[i]; - - /* Each thread allocates its own gi, no race */ - groups_sort(gi); } + + /* Each thread allocates its own gi, no race */ + groups_sort(gi); } else { gi = get_group_info(rqgi); } From 6c70076667f246dc200c7a3e9aeabd2f8f388416 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 31 Jan 2018 14:03:50 +0100 Subject: [PATCH 1537/3326] Linux 4.14.16 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index bf1a277a67a431..90a4bffa844673 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 15 +SUBLEVEL = 16 EXTRAVERSION = NAME = Petit Gorille From d383a4277b75f3305d78006bb8de673cd17d04a5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 22 Jan 2018 11:39:47 +0100 Subject: [PATCH 1538/3326] futex: Fix OWNER_DEAD fixup commit a97cb0e7b3f4c6297fd857055ae8e895f402f501 upstream. Both Geert and DaveJ reported that the recent futex commit: c1e2f0eaf015 ("futex: Avoid violating the 10th rule of futex") introduced a problem with setting OWNER_DEAD. We set the bit on an uninitialized variable and then entirely optimize it away as a dead-store. Move the setting of the bit to where it is more useful. Reported-by: Geert Uytterhoeven Reported-by: Dave Jones Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: c1e2f0eaf015 ("futex: Avoid violating the 10th rule of futex") Link: http://lkml.kernel.org/r/20180122103947.GD2228@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar Cc: Ozkan Sezer Signed-off-by: Greg Kroah-Hartman --- kernel/futex.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/futex.c b/kernel/futex.c index 52b3f47031580f..046cd780d057e9 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2311,9 +2311,6 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); oldowner = pi_state->owner; - /* Owner died? */ - if (!pi_state->owner) - newtid |= FUTEX_OWNER_DIED; /* * We are here because either: @@ -2374,6 +2371,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, } newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; + /* Owner died? */ + if (!pi_state->owner) + newtid |= FUTEX_OWNER_DIED; if (get_futex_value_locked(&uval, uaddr)) goto handle_fault; From d5e06a1867210049bbfe27864ee0a40cfd9b1e9b Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 5 Jan 2018 16:26:00 -0800 Subject: [PATCH 1539/3326] loop: fix concurrent lo_open/lo_release MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit ae6650163c66a7eff1acd6eb8b0f752dcfa8eba5 upstream. 范龙飞 reports that KASAN can report a use-after-free in __lock_acquire. The reason is due to insufficient serialization in lo_release(), which will continue to use the loop device even after it has decremented the lo_refcnt to zero. In the meantime, another process can come in, open the loop device again as it is being shut down. Confusion ensues. Reported-by: 范龙飞 Signed-off-by: Linus Torvalds Signed-off-by: Jens Axboe Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/block/loop.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 85de6733469550..a2a0dce5114e71 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1576,9 +1576,8 @@ static int lo_open(struct block_device *bdev, fmode_t mode) return err; } -static void lo_release(struct gendisk *disk, fmode_t mode) +static void __lo_release(struct loop_device *lo) { - struct loop_device *lo = disk->private_data; int err; if (atomic_dec_return(&lo->lo_refcnt)) @@ -1605,6 +1604,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode) mutex_unlock(&lo->lo_ctl_mutex); } +static void lo_release(struct gendisk *disk, fmode_t mode) +{ + mutex_lock(&loop_index_mutex); + __lo_release(disk->private_data); + mutex_unlock(&loop_index_mutex); +} + static const struct block_device_operations lo_fops = { .owner = THIS_MODULE, .open = lo_open, From 00251aedef3195e912184a5798952a7793a77abe Mon Sep 17 00:00:00 2001 From: Janakarajan Natarajan Date: Mon, 6 Nov 2017 11:44:23 -0600 Subject: [PATCH 1540/3326] KVM: x86: Fix CPUID function for word 6 (80000001_ECX) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 50a671d4d15b859f447fa527191073019b6ce9cb upstream. The function for CPUID 80000001 ECX is set to 0xc0000001. Set it to 0x80000001. Signed-off-by: Janakarajan Natarajan Reviewed-by: Jim Mattson Reviewed-by: Krish Sadhukhan Reviewed-by: Borislav Petkov Fixes: d6321d493319 ("KVM: x86: generalize guest_cpuid_has_ helpers") Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/cpuid.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index cdc70a3a65838b..c2cea6651279f7 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -44,7 +44,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX}, [CPUID_1_ECX] = { 1, 0, CPUID_ECX}, [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX}, - [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX}, + [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX}, [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX}, [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX}, [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX}, From 636124c8699be4f5f987ef7f95cfda1acaeed88c Mon Sep 17 00:00:00 2001 From: Joel Stanley Date: Thu, 21 Dec 2017 11:11:31 +1030 Subject: [PATCH 1541/3326] tools/gpio: Fix build error with musl libc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 1696784eb7b52b13b62d160c028ef2c2c981d4f2 upstream. The GPIO tools build fails when using a buildroot toolchain that uses musl as it's C library: arm-broomstick-linux-musleabi-gcc -Wp,-MD,./.gpio-event-mon.o.d \ -Wp,-MT,gpio-event-mon.o -O2 -Wall -g -D_GNU_SOURCE \ -Iinclude -D"BUILD_STR(s)=#s" -c -o gpio-event-mon.o gpio-event-mon.c gpio-event-mon.c:30:6: error: unknown type name ‘u_int32_t’; did you mean ‘uint32_t’? u_int32_t handleflags, ^~~~~~~~~ uint32_t The glibc headers installed on my laptop include sys/types.h in unistd.h, but it appears that musl does not. Fixes: 97f69747d8b1 ("tools/gpio: add the gpio-event-mon tool") Signed-off-by: Joel Stanley Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- tools/gpio/gpio-event-mon.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c index 1c14c259515836..4b36323ea64bb1 100644 --- a/tools/gpio/gpio-event-mon.c +++ b/tools/gpio/gpio-event-mon.c @@ -23,6 +23,7 @@ #include #include #include +#include #include int monitor_device(const char *device_name, From 460c5b9745b8b455be5591b1179e89a8bdcb1afd Mon Sep 17 00:00:00 2001 From: Patrice Chotard Date: Fri, 12 Jan 2018 13:16:08 +0100 Subject: [PATCH 1542/3326] gpio: stmpe: i2c transfer are forbiden in atomic context commit b888fb6f2a278442933e3bfab70262e9a5365fb3 upstream. Move the workaround from stmpe_gpio_irq_unmask() which is executed in atomic context to stmpe_gpio_irq_sync_unlock() which is not. It fixes the following issue: [ 1.500000] BUG: scheduling while atomic: swapper/1/0x00000002 [ 1.500000] CPU: 0 PID: 1 Comm: swapper Not tainted 4.15.0-rc2-00020-gbd4301f-dirty #28 [ 1.520000] Hardware name: STM32 (Device Tree Support) [ 1.520000] [<0000bfc9>] (unwind_backtrace) from [<0000b347>] (show_stack+0xb/0xc) [ 1.530000] [<0000b347>] (show_stack) from [<0001fc49>] (__schedule_bug+0x39/0x58) [ 1.530000] [<0001fc49>] (__schedule_bug) from [<00168211>] (__schedule+0x23/0x2b2) [ 1.550000] [<00168211>] (__schedule) from [<001684f7>] (schedule+0x57/0x64) [ 1.550000] [<001684f7>] (schedule) from [<0016a513>] (schedule_timeout+0x137/0x164) [ 1.550000] [<0016a513>] (schedule_timeout) from [<00168b91>] (wait_for_common+0x8d/0xfc) [ 1.570000] [<00168b91>] (wait_for_common) from [<00139753>] (stm32f4_i2c_xfer+0xe9/0xfe) [ 1.580000] [<00139753>] (stm32f4_i2c_xfer) from [<00138545>] (__i2c_transfer+0x111/0x148) [ 1.590000] [<00138545>] (__i2c_transfer) from [<001385cf>] (i2c_transfer+0x53/0x70) [ 1.590000] [<001385cf>] (i2c_transfer) from [<001388a5>] (i2c_smbus_xfer+0x12f/0x36e) [ 1.600000] [<001388a5>] (i2c_smbus_xfer) from [<00138b49>] (i2c_smbus_read_byte_data+0x1f/0x2a) [ 1.610000] [<00138b49>] (i2c_smbus_read_byte_data) from [<00124fdd>] (__stmpe_reg_read+0xd/0x24) [ 1.620000] [<00124fdd>] (__stmpe_reg_read) from [<001252b3>] (stmpe_reg_read+0x19/0x24) [ 1.630000] [<001252b3>] (stmpe_reg_read) from [<0002c4d1>] (unmask_irq+0x17/0x22) [ 1.640000] [<0002c4d1>] (unmask_irq) from [<0002c57f>] (irq_startup+0x6f/0x78) [ 1.650000] [<0002c57f>] (irq_startup) from [<0002b7a1>] (__setup_irq+0x319/0x47c) [ 1.650000] [<0002b7a1>] (__setup_irq) from [<0002bad3>] (request_threaded_irq+0x6b/0xe8) [ 1.660000] [<0002bad3>] (request_threaded_irq) from [<0002d0b9>] (devm_request_threaded_irq+0x3b/0x6a) [ 1.670000] [<0002d0b9>] (devm_request_threaded_irq) from [<001446e7>] (mmc_gpiod_request_cd_irq+0x49/0x8a) [ 1.680000] [<001446e7>] (mmc_gpiod_request_cd_irq) from [<0013d45d>] (mmc_start_host+0x49/0x60) [ 1.690000] [<0013d45d>] (mmc_start_host) from [<0013e40b>] (mmc_add_host+0x3b/0x54) [ 1.700000] [<0013e40b>] (mmc_add_host) from [<00148119>] (mmci_probe+0x4d1/0x60c) [ 1.710000] [<00148119>] (mmci_probe) from [<000f903b>] (amba_probe+0x7b/0xbe) [ 1.720000] [<000f903b>] (amba_probe) from [<001170e5>] (driver_probe_device+0x169/0x1f8) [ 1.730000] [<001170e5>] (driver_probe_device) from [<001171b7>] (__driver_attach+0x43/0x5c) [ 1.740000] [<001171b7>] (__driver_attach) from [<0011618d>] (bus_for_each_dev+0x3d/0x46) [ 1.740000] [<0011618d>] (bus_for_each_dev) from [<001165cd>] (bus_add_driver+0xcd/0x124) [ 1.740000] [<001165cd>] (bus_add_driver) from [<00117713>] (driver_register+0x4d/0x7a) [ 1.760000] [<00117713>] (driver_register) from [<001fc765>] (do_one_initcall+0xbd/0xe8) [ 1.770000] [<001fc765>] (do_one_initcall) from [<001fc88b>] (kernel_init_freeable+0xfb/0x134) [ 1.780000] [<001fc88b>] (kernel_init_freeable) from [<00167ee3>] (kernel_init+0x7/0x9c) [ 1.790000] [<00167ee3>] (kernel_init) from [<00009b65>] (ret_from_fork+0x11/0x2c) Signed-off-by: Alexandre TORGUE Signed-off-by: Patrice Chotard Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/gpio/gpio-stmpe.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 16cbc5702865cd..491b0974c0feb1 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -190,6 +190,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d) }; int i, j; + /* + * STMPE1600: to be able to get IRQ from pins, + * a read must be done on GPMR register, or a write in + * GPSR or GPCR registers + */ + if (stmpe->partnum == STMPE1600) { + stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]); + stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]); + } + for (i = 0; i < CACHE_NR_REGS; i++) { /* STMPE801 and STMPE1600 don't have RE and FE registers */ if ((stmpe->partnum == STMPE801 || @@ -227,21 +237,11 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc); - struct stmpe *stmpe = stmpe_gpio->stmpe; int offset = d->hwirq; int regoffset = offset / 8; int mask = BIT(offset % 8); stmpe_gpio->regs[REG_IE][regoffset] |= mask; - - /* - * STMPE1600 workaround: to be able to get IRQ from pins, - * a read must be done on GPMR register, or a write in - * GPSR or GPCR registers - */ - if (stmpe->partnum == STMPE1600) - stmpe_reg_read(stmpe, - stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]); } static void stmpe_dbg_show_one(struct seq_file *s, From c5845e025423b02235489f154903b8befabebb02 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 22 Jan 2018 13:19:28 +0100 Subject: [PATCH 1543/3326] gpio: Fix kernel stack leak to userspace commit 24bd3efc9d1efb5f756a7c6f807a36ddb6adc671 upstream. The GPIO event descriptor was leaking kernel stack to userspace because we don't zero the variable before use. Ooops. Fix this. Reported-by: Arnd Bergmann Reviewed-by: Bartosz Golaszewski Reviewed-by: Arnd Bergmann Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/gpio/gpiolib.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index eb80dac4e26a52..bdd68ff197dc3f 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -723,6 +723,9 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) struct gpioevent_data ge; int ret, level; + /* Do not leak kernel stack to userspace */ + memset(&ge, 0, sizeof(ge)); + ge.timestamp = ktime_get_real_ns(); level = gpiod_get_value_cansleep(le->desc); From a5ba0b372da3cdc092ce0d98e5433e44aefab3f7 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 19 Jan 2018 14:18:34 +0100 Subject: [PATCH 1544/3326] ALSA: hda - Reduce the suspend time consumption for ALC256 commit 1c9609e3a8cf5997bd35205cfda1ff2218ee793b upstream. ALC256 has its own quirk to override the shutup call, and it contains the COEF update for pulling down the headset jack control. Currently, the COEF update is called after clearing the headphone pin, and this seems triggering a stall of the codec communication, and results in a long delay over a second at suspend. A quick resolution is to swap the calls: at first with the COEF update, then clear the headphone pin. Fixes: 4a219ef8f370 ("ALSA: hda/realtek - Add ALC256 HP depop function") Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=198503 Reported-by: Paul Menzel Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 145e92d6ca94a3..b2d039537d5ecf 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -3131,11 +3131,13 @@ static void alc256_shutup(struct hda_codec *codec) if (hp_pin_sense) msleep(85); + /* 3k pull low control for Headset jack. */ + /* NOTE: call this before clearing the pin, otherwise codec stalls */ + alc_update_coef_idx(codec, 0x46, 0, 3 << 12); + snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); - alc_update_coef_idx(codec, 0x46, 0, 3 << 12); /* 3k pull low control for Headset jack. */ - if (hp_pin_sense) msleep(100); From 2992182765762c2d5bfa70ff2da5f1841e7aba89 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Sun, 26 Nov 2017 00:16:46 +0100 Subject: [PATCH 1545/3326] crypto: ecdh - fix typo in KPP dependency of CRYPTO_ECDH commit b5b9007730ce1d90deaf25d7f678511550744bdc upstream. This fixes a typo in the CRYPTO_KPP dependency of CRYPTO_ECDH. Fixes: 3c4b23901a0c ("crypto: ecdh - Add ECDH software support") Signed-off-by: Hauke Mehrtens Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index ac5fb37e6f4b71..42212b60a0eee4 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -130,7 +130,7 @@ config CRYPTO_DH config CRYPTO_ECDH tristate "ECDH algorithm" - select CRYTPO_KPP + select CRYPTO_KPP select CRYPTO_RNG_DEFAULT help Generic implementation of the ECDH algorithm From 6b3dcff7ca7e05c784ece15cc8d9c0bd913fbda4 Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Thu, 18 Jan 2018 20:41:09 +0100 Subject: [PATCH 1546/3326] crypto: aesni - handle zero length dst buffer commit 9c674e1e2f9e24fa4392167efe343749008338e0 upstream. GCM can be invoked with a zero destination buffer. This is possible if the AAD and the ciphertext have zero lengths and only the tag exists in the source buffer (i.e. a source buffer cannot be zero). In this case, the GCM cipher only performs the authentication and no decryption operation. When the destination buffer has zero length, it is possible that no page is mapped to the SG pointing to the destination. In this case, sg_page(req->dst) is an invalid access. Therefore, page accesses should only be allowed if the req->dst->length is non-zero which is the indicator that a page must exist. This fixes a crash that can be triggered by user space via AF_ALG. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- arch/x86/crypto/aesni-intel_glue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 5c15d6b573299e..fe2c7c16fd3765 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -823,7 +823,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen, if (sg_is_last(req->src) && (!PageHighMem(sg_page(req->src)) || req->src->offset + req->src->length <= PAGE_SIZE) && - sg_is_last(req->dst) && + sg_is_last(req->dst) && req->dst->length && (!PageHighMem(sg_page(req->dst)) || req->dst->offset + req->dst->length <= PAGE_SIZE)) { one_entry_in_sg = 1; From e50f1d7c0d0747378aa49cf5ef4863dcebd6a485 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Wed, 13 Dec 2017 14:53:43 +0100 Subject: [PATCH 1547/3326] crypto: aesni - fix typo in generic_gcmaes_decrypt commit 106840c41096a01079d3a2025225029c13713802 upstream. generic_gcmaes_decrypt needs to use generic_gcmaes_ctx, not aesni_rfc4106_gcm_ctx. This is actually harmless because the fields in struct generic_gcmaes_ctx share the layout of the same fields in aesni_rfc4106_gcm_ctx. Fixes: cce2ea8d90fe ("crypto: aesni - add generic gcm(aes)") Signed-off-by: Sabrina Dubroca Reviewed-by: Stefano Brivio Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- arch/x86/crypto/aesni-intel_glue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index fe2c7c16fd3765..f54f4730f6b6c9 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -1115,7 +1115,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req) { __be32 counter = cpu_to_be32(1); struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); + struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); From cffaf2b6b179227b31f8e21d7f65b68630fc6606 Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Tue, 22 Aug 2017 10:08:08 +0200 Subject: [PATCH 1548/3326] crypto: gcm - add GCM IV size constant commit ef780324592dd639e4bfbc5b9bf8934b234b7c99 upstream. Many GCM users use directly GCM IV size instead of using some constant. This patch add all IV size constant used by GCM. Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- include/crypto/gcm.h | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 include/crypto/gcm.h diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h new file mode 100644 index 00000000000000..c50e057ea17e30 --- /dev/null +++ b/include/crypto/gcm.h @@ -0,0 +1,8 @@ +#ifndef _CRYPTO_GCM_H +#define _CRYPTO_GCM_H + +#define GCM_AES_IV_SIZE 12 +#define GCM_RFC4106_IV_SIZE 8 +#define GCM_RFC4543_IV_SIZE 8 + +#endif From e704e550cb7bfcd86044f875abce2e7703139ba9 Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Tue, 22 Aug 2017 10:08:18 +0200 Subject: [PATCH 1549/3326] crypto: aesni - Use GCM IV size constant commit 46d93748e5a3628f9f553832cd64d8a59d8bafde upstream. This patch replace GCM IV size value by their constant name. Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- arch/x86/crypto/aesni-intel_glue.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index f54f4730f6b6c9..7ff4c3b3322bb3 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -1131,7 +1132,7 @@ static struct aead_alg aesni_aead_algs[] = { { .setauthsize = common_rfc4106_set_authsize, .encrypt = helper_rfc4106_encrypt, .decrypt = helper_rfc4106_decrypt, - .ivsize = 8, + .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = 16, .base = { .cra_name = "__gcm-aes-aesni", @@ -1149,7 +1150,7 @@ static struct aead_alg aesni_aead_algs[] = { { .setauthsize = rfc4106_set_authsize, .encrypt = rfc4106_encrypt, .decrypt = rfc4106_decrypt, - .ivsize = 8, + .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = 16, .base = { .cra_name = "rfc4106(gcm(aes))", @@ -1165,7 +1166,7 @@ static struct aead_alg aesni_aead_algs[] = { { .setauthsize = generic_gcmaes_set_authsize, .encrypt = generic_gcmaes_encrypt, .decrypt = generic_gcmaes_decrypt, - .ivsize = 12, + .ivsize = GCM_AES_IV_SIZE, .maxauthsize = 16, .base = { .cra_name = "gcm(aes)", From 265502fe9ab62a92d06cc7aa518d349fd60bdba1 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Wed, 13 Dec 2017 14:54:36 +0100 Subject: [PATCH 1550/3326] crypto: aesni - add wrapper for generic gcm(aes) commit fc8517bf627c9b834f80274a1bc9ecd39b27231b upstream. When I added generic-gcm-aes I didn't add a wrapper like the one provided for rfc4106(gcm(aes)). We need to add a cryptd wrapper to fall back on in case the FPU is not available, otherwise we might corrupt the FPU state. Fixes: cce2ea8d90fe ("crypto: aesni - add generic gcm(aes)") Reported-by: Ilya Lesokhin Signed-off-by: Sabrina Dubroca Reviewed-by: Stefano Brivio Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- arch/x86/crypto/aesni-intel_glue.c | 66 ++++++++++++++++++++++++------ 1 file changed, 54 insertions(+), 12 deletions(-) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 7ff4c3b3322bb3..c690ddc78c0363 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -690,8 +690,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); } -static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, - unsigned int key_len) +static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key, + unsigned int key_len) { struct cryptd_aead **ctx = crypto_aead_ctx(parent); struct cryptd_aead *cryptd_tfm = *ctx; @@ -716,8 +716,8 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead, /* This is the Integrity Check Value (aka the authentication tag length and can * be 8, 12 or 16 bytes long. */ -static int rfc4106_set_authsize(struct crypto_aead *parent, - unsigned int authsize) +static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent, + unsigned int authsize) { struct cryptd_aead **ctx = crypto_aead_ctx(parent); struct cryptd_aead *cryptd_tfm = *ctx; @@ -929,7 +929,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req) aes_ctx); } -static int rfc4106_encrypt(struct aead_request *req) +static int gcmaes_wrapper_encrypt(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cryptd_aead **ctx = crypto_aead_ctx(tfm); @@ -945,7 +945,7 @@ static int rfc4106_encrypt(struct aead_request *req) return crypto_aead_encrypt(req); } -static int rfc4106_decrypt(struct aead_request *req) +static int gcmaes_wrapper_decrypt(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cryptd_aead **ctx = crypto_aead_ctx(tfm); @@ -1127,6 +1127,30 @@ static int generic_gcmaes_decrypt(struct aead_request *req) aes_ctx); } +static int generic_gcmaes_init(struct crypto_aead *aead) +{ + struct cryptd_aead *cryptd_tfm; + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni", + CRYPTO_ALG_INTERNAL, + CRYPTO_ALG_INTERNAL); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + + *ctx = cryptd_tfm; + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); + + return 0; +} + +static void generic_gcmaes_exit(struct crypto_aead *aead) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_free_aead(*ctx); +} + static struct aead_alg aesni_aead_algs[] = { { .setkey = common_rfc4106_set_key, .setauthsize = common_rfc4106_set_authsize, @@ -1146,10 +1170,10 @@ static struct aead_alg aesni_aead_algs[] = { { }, { .init = rfc4106_init, .exit = rfc4106_exit, - .setkey = rfc4106_set_key, - .setauthsize = rfc4106_set_authsize, - .encrypt = rfc4106_encrypt, - .decrypt = rfc4106_decrypt, + .setkey = gcmaes_wrapper_set_key, + .setauthsize = gcmaes_wrapper_set_authsize, + .encrypt = gcmaes_wrapper_encrypt, + .decrypt = gcmaes_wrapper_decrypt, .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = 16, .base = { @@ -1168,14 +1192,32 @@ static struct aead_alg aesni_aead_algs[] = { { .decrypt = generic_gcmaes_decrypt, .ivsize = GCM_AES_IV_SIZE, .maxauthsize = 16, + .base = { + .cra_name = "__generic-gcm-aes-aesni", + .cra_driver_name = "__driver-generic-gcm-aes-aesni", + .cra_priority = 0, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), + .cra_alignmask = AESNI_ALIGN - 1, + .cra_module = THIS_MODULE, + }, +}, { + .init = generic_gcmaes_init, + .exit = generic_gcmaes_exit, + .setkey = gcmaes_wrapper_set_key, + .setauthsize = gcmaes_wrapper_set_authsize, + .encrypt = gcmaes_wrapper_encrypt, + .decrypt = gcmaes_wrapper_decrypt, + .ivsize = GCM_AES_IV_SIZE, + .maxauthsize = 16, .base = { .cra_name = "gcm(aes)", .cra_driver_name = "generic-gcm-aesni", .cra_priority = 400, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), - .cra_alignmask = AESNI_ALIGN - 1, + .cra_ctxsize = sizeof(struct cryptd_aead *), .cra_module = THIS_MODULE, }, } }; From f898a39985a30050abdad7a40b5b0098801509d4 Mon Sep 17 00:00:00 2001 From: Junaid Shahid Date: Wed, 20 Dec 2017 17:08:37 -0800 Subject: [PATCH 1551/3326] crypto: aesni - Fix out-of-bounds access of the data buffer in generic-gcm-aesni commit b20209c91e23a9bbad9cac2f80bc16b3c259e10e upstream. The aesni_gcm_enc/dec functions can access memory before the start of the data buffer if the length of the data buffer is less than 16 bytes. This is because they perform the read via a single 16-byte load. This can potentially result in accessing a page that is not mapped and thus causing the machine to crash. This patch fixes that by reading the partial block byte-by-byte and optionally an via 8-byte load if the block was at least 8 bytes. Fixes: 0487ccac ("crypto: aesni - make non-AVX AES-GCM work with any aadlen") Signed-off-by: Junaid Shahid Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- arch/x86/crypto/aesni-intel_asm.S | 87 ++++++++++++++++--------------- 1 file changed, 45 insertions(+), 42 deletions(-) diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 3d09e3aca18dad..63ba858b1a776b 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -257,6 +257,37 @@ aad_shift_arr: pxor \TMP1, \GH # result is in TMP1 .endm +# Reads DLEN bytes starting at DPTR and stores in XMMDst +# where 0 < DLEN < 16 +# Clobbers %rax, DLEN and XMM1 +.macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst + cmp $8, \DLEN + jl _read_lt8_\@ + mov (\DPTR), %rax + MOVQ_R64_XMM %rax, \XMMDst + sub $8, \DLEN + jz _done_read_partial_block_\@ + xor %eax, %eax +_read_next_byte_\@: + shl $8, %rax + mov 7(\DPTR, \DLEN, 1), %al + dec \DLEN + jnz _read_next_byte_\@ + MOVQ_R64_XMM %rax, \XMM1 + pslldq $8, \XMM1 + por \XMM1, \XMMDst + jmp _done_read_partial_block_\@ +_read_lt8_\@: + xor %eax, %eax +_read_next_byte_lt8_\@: + shl $8, %rax + mov -1(\DPTR, \DLEN, 1), %al + dec \DLEN + jnz _read_next_byte_lt8_\@ + MOVQ_R64_XMM %rax, \XMMDst +_done_read_partial_block_\@: +.endm + /* * if a = number of total plaintext bytes * b = floor(a/16) @@ -1386,14 +1417,6 @@ _esb_loop_\@: * * AAD Format with 64-bit Extended Sequence Number * -* aadLen: -* from the definition of the spec, aadLen can only be 8 or 12 bytes. -* The code supports 16 too but for other sizes, the code will fail. -* -* TLen: -* from the definition of the spec, TLen can only be 8, 12 or 16 bytes. -* For other sizes, the code will fail. -* * poly = x^128 + x^127 + x^126 + x^121 + 1 * *****************************************************************************/ @@ -1487,19 +1510,16 @@ _zero_cipher_left_decrypt: PSHUFB_XMM %xmm10, %xmm0 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn) - sub $16, %r11 - add %r13, %r11 - movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block - lea SHIFT_MASK+16(%rip), %r12 - sub %r13, %r12 -# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes -# (%r13 is the number of bytes in plaintext mod 16) - movdqu (%r12), %xmm2 # get the appropriate shuffle mask - PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes + lea (%arg3,%r11,1), %r10 + mov %r13, %r12 + READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 + + lea ALL_F+16(%rip), %r12 + sub %r13, %r12 movdqa %xmm1, %xmm2 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn) - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 + movdqu (%r12), %xmm1 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0 pand %xmm1, %xmm2 @@ -1508,9 +1528,6 @@ _zero_cipher_left_decrypt: pxor %xmm2, %xmm8 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 - # GHASH computation for the last <16 byte block - sub %r13, %r11 - add $16, %r11 # output %r13 bytes MOVQ_R64_XMM %xmm0, %rax @@ -1664,14 +1681,6 @@ ENDPROC(aesni_gcm_dec) * * AAD Format with 64-bit Extended Sequence Number * -* aadLen: -* from the definition of the spec, aadLen can only be 8 or 12 bytes. -* The code supports 16 too but for other sizes, the code will fail. -* -* TLen: -* from the definition of the spec, TLen can only be 8, 12 or 16 bytes. -* For other sizes, the code will fail. -* * poly = x^128 + x^127 + x^126 + x^121 + 1 ***************************************************************************/ ENTRY(aesni_gcm_enc) @@ -1764,19 +1773,16 @@ _zero_cipher_left_encrypt: movdqa SHUF_MASK(%rip), %xmm10 PSHUFB_XMM %xmm10, %xmm0 - ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) - sub $16, %r11 - add %r13, %r11 - movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks - lea SHIFT_MASK+16(%rip), %r12 + + lea (%arg3,%r11,1), %r10 + mov %r13, %r12 + READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 + + lea ALL_F+16(%rip), %r12 sub %r13, %r12 - # adjust the shuffle mask pointer to be able to shift 16-r13 bytes - # (%r13 is the number of bytes in plaintext mod 16) - movdqu (%r12), %xmm2 # get the appropriate shuffle mask - PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn) - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 + movdqu (%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm0 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0 movdqa SHUF_MASK(%rip), %xmm10 @@ -1785,9 +1791,6 @@ _zero_cipher_left_encrypt: pxor %xmm0, %xmm8 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 # GHASH computation for the last <16 byte block - sub %r13, %r11 - add $16, %r11 - movdqa SHUF_MASK(%rip), %xmm10 PSHUFB_XMM %xmm10, %xmm0 From 5ca02df0160233e463b043897cd17e32d38cb70e Mon Sep 17 00:00:00 2001 From: Junaid Shahid Date: Wed, 20 Dec 2017 17:08:38 -0800 Subject: [PATCH 1552/3326] crypto: aesni - Fix out-of-bounds access of the AAD buffer in generic-gcm-aesni commit 1ecdd37e308ca149dc378cce225068cbac54e3a6 upstream. The aesni_gcm_enc/dec functions can access memory after the end of the AAD buffer if the AAD length is not a multiple of 4 bytes. It didn't matter with rfc4106-gcm-aesni as in that case the AAD was always followed by the 8 byte IV, but that is no longer the case with generic-gcm-aesni. This can potentially result in accessing a page that is not mapped and thus causing the machine to crash. This patch fixes that by reading the last <16 byte block of the AAD byte-by-byte and optionally via an 8-byte load if the block was at least 8 bytes. Fixes: 0487ccac ("crypto: aesni - make non-AVX AES-GCM work with any aadlen") Signed-off-by: Junaid Shahid Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- arch/x86/crypto/aesni-intel_asm.S | 112 ++++-------------------------- 1 file changed, 12 insertions(+), 100 deletions(-) diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 63ba858b1a776b..12e8484a8ee79b 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -90,30 +90,6 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100 ALL_F: .octa 0xffffffffffffffffffffffffffffffff .octa 0x00000000000000000000000000000000 -.section .rodata -.align 16 -.type aad_shift_arr, @object -.size aad_shift_arr, 272 -aad_shift_arr: - .octa 0xffffffffffffffffffffffffffffffff - .octa 0xffffffffffffffffffffffffffffff0C - .octa 0xffffffffffffffffffffffffffff0D0C - .octa 0xffffffffffffffffffffffffff0E0D0C - .octa 0xffffffffffffffffffffffff0F0E0D0C - .octa 0xffffffffffffffffffffff0C0B0A0908 - .octa 0xffffffffffffffffffff0D0C0B0A0908 - .octa 0xffffffffffffffffff0E0D0C0B0A0908 - .octa 0xffffffffffffffff0F0E0D0C0B0A0908 - .octa 0xffffffffffffff0C0B0A090807060504 - .octa 0xffffffffffff0D0C0B0A090807060504 - .octa 0xffffffffff0E0D0C0B0A090807060504 - .octa 0xffffffff0F0E0D0C0B0A090807060504 - .octa 0xffffff0C0B0A09080706050403020100 - .octa 0xffff0D0C0B0A09080706050403020100 - .octa 0xff0E0D0C0B0A09080706050403020100 - .octa 0x0F0E0D0C0B0A09080706050403020100 - - .text @@ -304,62 +280,30 @@ _done_read_partial_block_\@: XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation MOVADQ SHUF_MASK(%rip), %xmm14 mov arg7, %r10 # %r10 = AAD - mov arg8, %r12 # %r12 = aadLen - mov %r12, %r11 + mov arg8, %r11 # %r11 = aadLen pxor %xmm\i, %xmm\i pxor \XMM2, \XMM2 cmp $16, %r11 - jl _get_AAD_rest8\num_initial_blocks\operation + jl _get_AAD_rest\num_initial_blocks\operation _get_AAD_blocks\num_initial_blocks\operation: movdqu (%r10), %xmm\i PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data pxor %xmm\i, \XMM2 GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 add $16, %r10 - sub $16, %r12 sub $16, %r11 cmp $16, %r11 jge _get_AAD_blocks\num_initial_blocks\operation movdqu \XMM2, %xmm\i + + /* read the last <16B of AAD */ +_get_AAD_rest\num_initial_blocks\operation: cmp $0, %r11 je _get_AAD_done\num_initial_blocks\operation - pxor %xmm\i,%xmm\i - - /* read the last <16B of AAD. since we have at least 4B of - data right after the AAD (the ICV, and maybe some CT), we can - read 4B/8B blocks safely, and then get rid of the extra stuff */ -_get_AAD_rest8\num_initial_blocks\operation: - cmp $4, %r11 - jle _get_AAD_rest4\num_initial_blocks\operation - movq (%r10), \TMP1 - add $8, %r10 - sub $8, %r11 - pslldq $8, \TMP1 - psrldq $8, %xmm\i - pxor \TMP1, %xmm\i - jmp _get_AAD_rest8\num_initial_blocks\operation -_get_AAD_rest4\num_initial_blocks\operation: - cmp $0, %r11 - jle _get_AAD_rest0\num_initial_blocks\operation - mov (%r10), %eax - movq %rax, \TMP1 - add $4, %r10 - sub $4, %r10 - pslldq $12, \TMP1 - psrldq $4, %xmm\i - pxor \TMP1, %xmm\i -_get_AAD_rest0\num_initial_blocks\operation: - /* finalize: shift out the extra bytes we read, and align - left. since pslldq can only shift by an immediate, we use - vpshufb and an array of shuffle masks */ - movq %r12, %r11 - salq $4, %r11 - movdqu aad_shift_arr(%r11), \TMP1 - PSHUFB_XMM \TMP1, %xmm\i -_get_AAD_rest_final\num_initial_blocks\operation: + READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data pxor \XMM2, %xmm\i GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 @@ -563,62 +507,30 @@ _initial_blocks_done\num_initial_blocks\operation: XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation MOVADQ SHUF_MASK(%rip), %xmm14 mov arg7, %r10 # %r10 = AAD - mov arg8, %r12 # %r12 = aadLen - mov %r12, %r11 + mov arg8, %r11 # %r11 = aadLen pxor %xmm\i, %xmm\i pxor \XMM2, \XMM2 cmp $16, %r11 - jl _get_AAD_rest8\num_initial_blocks\operation + jl _get_AAD_rest\num_initial_blocks\operation _get_AAD_blocks\num_initial_blocks\operation: movdqu (%r10), %xmm\i PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data pxor %xmm\i, \XMM2 GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 add $16, %r10 - sub $16, %r12 sub $16, %r11 cmp $16, %r11 jge _get_AAD_blocks\num_initial_blocks\operation movdqu \XMM2, %xmm\i + + /* read the last <16B of AAD */ +_get_AAD_rest\num_initial_blocks\operation: cmp $0, %r11 je _get_AAD_done\num_initial_blocks\operation - pxor %xmm\i,%xmm\i - - /* read the last <16B of AAD. since we have at least 4B of - data right after the AAD (the ICV, and maybe some PT), we can - read 4B/8B blocks safely, and then get rid of the extra stuff */ -_get_AAD_rest8\num_initial_blocks\operation: - cmp $4, %r11 - jle _get_AAD_rest4\num_initial_blocks\operation - movq (%r10), \TMP1 - add $8, %r10 - sub $8, %r11 - pslldq $8, \TMP1 - psrldq $8, %xmm\i - pxor \TMP1, %xmm\i - jmp _get_AAD_rest8\num_initial_blocks\operation -_get_AAD_rest4\num_initial_blocks\operation: - cmp $0, %r11 - jle _get_AAD_rest0\num_initial_blocks\operation - mov (%r10), %eax - movq %rax, \TMP1 - add $4, %r10 - sub $4, %r10 - pslldq $12, \TMP1 - psrldq $4, %xmm\i - pxor \TMP1, %xmm\i -_get_AAD_rest0\num_initial_blocks\operation: - /* finalize: shift out the extra bytes we read, and align - left. since pslldq can only shift by an immediate, we use - vpshufb and an array of shuffle masks */ - movq %r12, %r11 - salq $4, %r11 - movdqu aad_shift_arr(%r11), \TMP1 - PSHUFB_XMM \TMP1, %xmm\i -_get_AAD_rest_final\num_initial_blocks\operation: + READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data pxor \XMM2, %xmm\i GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 From bb9eec7b301eeb178fc1305708ae8902fa507953 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 26 Dec 2017 17:21:17 +0100 Subject: [PATCH 1553/3326] crypto: inside-secure - fix hash when length is a multiple of a block commit 809778e02cd45d0625439fee67688f655627bb3c upstream. This patch fixes the hash support in the SafeXcel driver when the update size is a multiple of a block size, and when a final call is made just after with a size of 0. In such cases the driver should cache the last block from the update to avoid handling 0 length data on the final call (that's a hardware limitation). Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver") Signed-off-by: Antoine Tenart Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/inside-secure/safexcel_hash.c | 36 ++++++++++++++------ 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 3980f946874fa0..ebf1388e1e100c 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -185,17 +185,31 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, else cache_len = queued - areq->nbytes; - /* - * If this is not the last request and the queued data does not fit - * into full blocks, cache it for the next send() call. - */ - extra = queued & (crypto_ahash_blocksize(ahash) - 1); - if (!req->last_req && extra) { - sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), - req->cache_next, extra, areq->nbytes - extra); - - queued -= extra; - len -= extra; + if (!req->last_req) { + /* If this is not the last request and the queued data does not + * fit into full blocks, cache it for the next send() call. + */ + extra = queued & (crypto_ahash_blocksize(ahash) - 1); + if (!extra) + /* If this is not the last request and the queued data + * is a multiple of a block, cache the last one for now. + */ + extra = queued - crypto_ahash_blocksize(ahash); + + if (extra) { + sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), + req->cache_next, extra, + areq->nbytes - extra); + + queued -= extra; + len -= extra; + + if (!queued) { + *commands = 0; + *results = 0; + return 0; + } + } } spin_lock_bh(&priv->ring[ring].egress_lock); From 199d97815d32acd0f1d20cbd04e39e3dead59a8b Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 26 Dec 2017 17:21:16 +0100 Subject: [PATCH 1554/3326] crypto: inside-secure - avoid unmapping DMA memory that was not mapped commit c957f8b3e2e54b29f53ef69decc87bbc858c9b58 upstream. This patch adds a parameter in the SafeXcel ahash request structure to keep track of the number of SG entries mapped. This allows not to call dma_unmap_sg() when dma_map_sg() wasn't called in the first place. This also removes a warning when the debugging of the DMA-API is enabled in the kernel configuration: "DMA-API: device driver tries to free DMA memory it has not allocated". Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver") Signed-off-by: Antoine Tenart Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/inside-secure/safexcel_hash.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index ebf1388e1e100c..0626b33d2886ea 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -33,6 +33,8 @@ struct safexcel_ahash_req { bool finish; bool hmac; + int nents; + u8 state_sz; /* expected sate size, only set once */ u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; @@ -151,8 +153,10 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, result_sz = crypto_ahash_digestsize(ahash); memcpy(sreq->state, areq->result, result_sz); - dma_unmap_sg(priv->dev, areq->src, - sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE); + if (sreq->nents) { + dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE); + sreq->nents = 0; + } safexcel_free_context(priv, async, sreq->state_sz); @@ -177,7 +181,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, struct safexcel_command_desc *cdesc, *first_cdesc = NULL; struct safexcel_result_desc *rdesc; struct scatterlist *sg; - int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; + int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; queued = len = req->len - req->processed; if (queued < crypto_ahash_blocksize(ahash)) @@ -247,15 +251,15 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, } /* Now handle the current ahash request buffer(s) */ - nents = dma_map_sg(priv->dev, areq->src, - sg_nents_for_len(areq->src, areq->nbytes), - DMA_TO_DEVICE); - if (!nents) { + req->nents = dma_map_sg(priv->dev, areq->src, + sg_nents_for_len(areq->src, areq->nbytes), + DMA_TO_DEVICE); + if (!req->nents) { ret = -ENOMEM; goto cdesc_rollback; } - for_each_sg(areq->src, sg, nents, i) { + for_each_sg(areq->src, sg, req->nents, i) { int sglen = sg_dma_len(sg); /* Do not overflow the request */ From b9788e278cec29fef2e837633fa1b97030417e78 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 19 Jan 2018 12:04:33 +0000 Subject: [PATCH 1555/3326] crypto: sha3-generic - fixes for alignment and big endian operation commit c013cee99d5a18aec8c71fee8f5f41369cd12595 upstream. Ensure that the input is byte swabbed before injecting it into the SHA3 transform. Use the get_unaligned() accessor for this so that we don't perform unaligned access inadvertently on architectures that do not support that. Fixes: 53964b9ee63b7075 ("crypto: sha3 - Add SHA-3 hash algorithm") Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/sha3_generic.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 7e8ed96236cefa..a68be626017c2b 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -18,6 +18,7 @@ #include #include #include +#include #define KECCAK_ROUNDS 24 @@ -149,7 +150,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data, unsigned int i; for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] ^= ((u64 *) src)[i]; + sctx->st[i] ^= get_unaligned_le64(src + 8 * i); keccakf(sctx->st); done += sctx->rsiz; @@ -174,7 +175,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out) sctx->buf[sctx->rsiz - 1] |= 0x80; for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] ^= ((u64 *) sctx->buf)[i]; + sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i); keccakf(sctx->st); From f41c8a003196df125b32ed8d78a66f9ea22ccce4 Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Tue, 2 Jan 2018 08:55:25 +0100 Subject: [PATCH 1556/3326] crypto: af_alg - whitelist mask and type commit bb30b8848c85e18ca7e371d0a869e94b3e383bdf upstream. The user space interface allows specifying the type and mask field used to allocate the cipher. Only a subset of the possible flags are intended for user space. Therefore, white-list the allowed flags. In case the user space caller uses at least one non-allowed flag, EINVAL is returned. Reported-by: syzbot Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/af_alg.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 6ec360213107fa..53b7fa4cf4ab8d 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent); static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { - const u32 forbidden = CRYPTO_ALG_INTERNAL; + const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct sockaddr_alg *sa = (void *)uaddr; @@ -158,6 +158,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) void *private; int err; + /* If caller uses non-allowed flag, return error. */ + if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed)) + return -EINVAL; + if (sock->state == SS_CONNECTED) return -EINVAL; @@ -176,9 +180,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (IS_ERR(type)) return PTR_ERR(type); - private = type->bind(sa->salg_name, - sa->salg_feat & ~forbidden, - sa->salg_mask & ~forbidden); + private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask); if (IS_ERR(private)) { module_put(type->owner); return PTR_ERR(private); From e3f7e6f2e03396df38491a484c6381e9301ff0f6 Mon Sep 17 00:00:00 2001 From: Aaron Armstrong Skomra Date: Thu, 7 Dec 2017 12:31:56 -0800 Subject: [PATCH 1557/3326] HID: wacom: EKR: ensure devres groups at higher indexes are released commit 791ae273731fa85d3332e45064dab177ae663e80 upstream. Background: ExpressKey Remotes communicate their events via usb dongle. Each dongle can hold up to 5 pairings at one time and one EKR (identified by its serial number) can unfortunately be paired with its dongle more than once. The pairing takes place in a round-robin fashion. Input devices are only created once per EKR, when a new serial number is seen in the list of pairings. However, if a device is created for a "higher" paring index and subsequently a second pairing occurs at a lower pairing index, unpairing the remote with that serial number from any pairing index will currently cause a driver crash. This occurs infrequently, as two remotes are necessary to trigger this bug and most users have only one remote. As an illustration, to trigger the bug you need to have two remotes, and pair them in this order: 1. slot 0 -> remote 1 (input device created for remote 1) 2. slot 1 -> remote 1 (duplicate pairing - no device created) 3. slot 2 -> remote 1 (duplicate pairing - no device created) 4. slot 3 -> remote 1 (duplicate pairing - no device created) 5. slot 4 -> remote 2 (input device created for remote 2) 6. slot 0 -> remote 2 (1 destroyed and recreated at slot 1) 7. slot 1 -> remote 2 (1 destroyed and recreated at slot 2) 8. slot 2 -> remote 2 (1 destroyed and recreated at slot 3) 9. slot 3 -> remote 2 (1 destroyed and not recreated) 10. slot 4 -> remote 2 (2 was already in this slot so no changes) 11. slot 0 -> remote 1 (The current code sees remote 2 was paired over in one of the dongle slots it occupied and attempts to remove all information about remote 2 [1]. It calls wacom_remote_destroy_one for remote 2, but the destroy function assumes the lowest index is where the remote's input device was created. The code "cleans up" the other remote 2 pairings including the one which the input device was based on, assuming they were were just duplicate pairings. However, the cleanup doesn't call the devres release function for the input device that was created in slot 4). This issue is fixed by this commit. [1] Remote 2 should subsequently be re-created on the next packet from the EKR at the lowest numbered slot that it occupies (here slot 1). Fixes: f9036bd43602 ("HID: wacom: EKR: use devres groups to manage resources") Signed-off-by: Aaron Armstrong Skomra Signed-off-by: Jiri Kosina Signed-off-by: Greg Kroah-Hartman --- drivers/hid/wacom_sys.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 906e654fb0ba42..65f1cfbbe7fec5 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -2340,23 +2340,23 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index) int i; unsigned long flags; - spin_lock_irqsave(&remote->remote_lock, flags); - remote->remotes[index].registered = false; - spin_unlock_irqrestore(&remote->remote_lock, flags); + for (i = 0; i < WACOM_MAX_REMOTES; i++) { + if (remote->remotes[i].serial == serial) { - if (remote->remotes[index].battery.battery) - devres_release_group(&wacom->hdev->dev, - &remote->remotes[index].battery.bat_desc); + spin_lock_irqsave(&remote->remote_lock, flags); + remote->remotes[i].registered = false; + spin_unlock_irqrestore(&remote->remote_lock, flags); - if (remote->remotes[index].group.name) - devres_release_group(&wacom->hdev->dev, - &remote->remotes[index]); + if (remote->remotes[i].battery.battery) + devres_release_group(&wacom->hdev->dev, + &remote->remotes[i].battery.bat_desc); + + if (remote->remotes[i].group.name) + devres_release_group(&wacom->hdev->dev, + &remote->remotes[i]); - for (i = 0; i < WACOM_MAX_REMOTES; i++) { - if (remote->remotes[i].serial == serial) { remote->remotes[i].serial = 0; remote->remotes[i].group.name = NULL; - remote->remotes[i].registered = false; remote->remotes[i].battery.battery = NULL; wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN; } From c76133afb9033e47a4b944f69a6d8f81450a9983 Mon Sep 17 00:00:00 2001 From: Jason Gerecke Date: Tue, 26 Dec 2017 14:53:55 -0800 Subject: [PATCH 1558/3326] HID: wacom: Fix reporting of touch toggle (WACOM_HID_WD_MUTE_DEVICE) events commit 403c0f681c1964ff1db8c2fb8de8c4067779d081 upstream. Touch toggle softkeys send a '1' while pressed and a '0' while released, requring the kernel to keep track of wether touch should be enabled or disabled. The code does not handle the state transitions properly, however. If the key is pressed repeatedly, the following four states of states are cycled through (assuming touch starts out enabled): Press: shared->is_touch_on => 0, SW_MUTE_DEVICE => 1 Release: shared->is_touch_on => 0, SW_MUTE_DEVICE => 1 Press: shared->is_touch_on => 1, SW_MUTE_DEVICE => 0 Release: shared->is_touch_on => 1, SW_MUTE_DEVICE => 1 The hardware always properly enables/disables touch when the key is pressed but applications that listen for SW_MUTE_DEVICE events to provide feedback about the state will only ever show touch as being enabled while the key is held, and only every-other time. This sequence occurs because the fallthrough WACOM_HID_WD_TOUCHONOFF case is always handled, and it uses the value of the *local* is_touch_on variable as the value to report to userspace. The local value is equal to the shared value when the button is pressed, but equal to zero when the button is released. Reporting the shared value to userspace fixes this problem, but the fallthrough case needs to update the shared value in an incompatible way (which is why the local variable was introduced in the first place). To work around this, we just handle both cases in a single block of code and update the shared variable as appropriate. Fixes: d793ff8187 ("HID: wacom: generic: support touch on/off softkey") Signed-off-by: Jason Gerecke Reviewed-by: Aaron Skomra Tested-by: Aaron Skomra Signed-off-by: Jiri Kosina Signed-off-by: Greg Kroah-Hartman --- drivers/hid/wacom_wac.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index aa692e28b2cd60..70cbe1e5a3d206 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -1924,7 +1924,6 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field struct wacom_features *features = &wacom_wac->features; unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); int i; - bool is_touch_on = value; bool do_report = false; /* @@ -1969,16 +1968,17 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field break; case WACOM_HID_WD_MUTE_DEVICE: - if (wacom_wac->shared->touch_input && value) { - wacom_wac->shared->is_touch_on = !wacom_wac->shared->is_touch_on; - is_touch_on = wacom_wac->shared->is_touch_on; - } - - /* fall through*/ case WACOM_HID_WD_TOUCHONOFF: if (wacom_wac->shared->touch_input) { + bool *is_touch_on = &wacom_wac->shared->is_touch_on; + + if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value) + *is_touch_on = !(*is_touch_on); + else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF) + *is_touch_on = value; + input_report_switch(wacom_wac->shared->touch_input, - SW_MUTE_DEVICE, !is_touch_on); + SW_MUTE_DEVICE, !(*is_touch_on)); input_sync(wacom_wac->shared->touch_input); } break; From aecad437fd709b3eeb9ed7eb336646e413999139 Mon Sep 17 00:00:00 2001 From: Jesse Chan Date: Mon, 20 Nov 2017 12:58:27 -0800 Subject: [PATCH 1559/3326] power: reset: zx-reboot: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE commit 348c7cf5fcbcb68838255759d4cb45d039af36d2 upstream. This change resolves a new compile-time warning when built as a loadable module: WARNING: modpost: missing MODULE_LICENSE() in drivers/power/reset/zx-reboot.o see include/linux/module.h for more information This adds the license as "GPL v2", which matches the header of the file. MODULE_DESCRIPTION and MODULE_AUTHOR are also added. Signed-off-by: Jesse Chan Signed-off-by: Sebastian Reichel Signed-off-by: Greg Kroah-Hartman --- drivers/power/reset/zx-reboot.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c index 7549c7f74a3caa..c03e96e6a041fb 100644 --- a/drivers/power/reset/zx-reboot.c +++ b/drivers/power/reset/zx-reboot.c @@ -82,3 +82,7 @@ static struct platform_driver zx_reboot_driver = { }, }; module_platform_driver(zx_reboot_driver); + +MODULE_DESCRIPTION("ZTE SoCs reset driver"); +MODULE_AUTHOR("Jun Nie "); +MODULE_LICENSE("GPL v2"); From 925e26b922c525e21a905bf99b4b20247d521c4f Mon Sep 17 00:00:00 2001 From: Jesse Chan Date: Mon, 20 Nov 2017 12:54:52 -0800 Subject: [PATCH 1560/3326] gpio: iop: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE commit 97b03136e1b637d7a9d2274c099e44ecf23f1103 upstream. This change resolves a new compile-time warning when built as a loadable module: WARNING: modpost: missing MODULE_LICENSE() in drivers/gpio/gpio-iop.o see include/linux/module.h for more information This adds the license as "GPL", which matches the header of the file. MODULE_DESCRIPTION and MODULE_AUTHOR are also added. Signed-off-by: Jesse Chan Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/gpio/gpio-iop.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c index 98c7ff2a76e76a..8d62db447ec1ba 100644 --- a/drivers/gpio/gpio-iop.c +++ b/drivers/gpio/gpio-iop.c @@ -58,3 +58,7 @@ static int __init iop3xx_gpio_init(void) return platform_driver_register(&iop3xx_gpio_driver); } arch_initcall(iop3xx_gpio_init); + +MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors"); +MODULE_AUTHOR("Lennert Buytenhek "); +MODULE_LICENSE("GPL"); From bf8c4b3dd7bfd47920e61ad5f67c827a2e66c01b Mon Sep 17 00:00:00 2001 From: Jesse Chan Date: Mon, 20 Nov 2017 12:54:26 -0800 Subject: [PATCH 1561/3326] gpio: ath79: add missing MODULE_DESCRIPTION/LICENSE commit 539340f37e6d6ed4cd93e8e18c9b2e4eafd4b842 upstream. This change resolves a new compile-time warning when built as a loadable module: WARNING: modpost: missing MODULE_LICENSE() in drivers/gpio/gpio-ath79.o see include/linux/module.h for more information This adds the license as "GPL v2", which matches the header of the file. MODULE_DESCRIPTION is also added. Signed-off-by: Jesse Chan Acked-by: Alban Bedel Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/gpio/gpio-ath79.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c index f33d4a5fe6718e..af0baf8da29550 100644 --- a/drivers/gpio/gpio-ath79.c +++ b/drivers/gpio/gpio-ath79.c @@ -323,3 +323,6 @@ static struct platform_driver ath79_gpio_driver = { }; module_platform_driver(ath79_gpio_driver); + +MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support"); +MODULE_LICENSE("GPL v2"); From e6a5fe3180369479988ef4c44f41d9c00e102dbd Mon Sep 17 00:00:00 2001 From: Jesse Chan Date: Mon, 20 Nov 2017 12:57:13 -0800 Subject: [PATCH 1562/3326] mtd: nand: denali_pci: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE commit d822401d1c6898a4a4ee03977b78b8cec402e88a upstream. This change resolves a new compile-time warning when built as a loadable module: WARNING: modpost: missing MODULE_LICENSE() in drivers/mtd/nand/denali_pci.o see include/linux/module.h for more information This adds the license as "GPL v2", which matches the header of the file. MODULE_DESCRIPTION and MODULE_AUTHOR are also added. Signed-off-by: Jesse Chan Acked-by: Masahiro Yamada Signed-off-by: Boris Brezillon Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/denali_pci.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c index 81370c79aa48aa..7ad0db65a6face 100644 --- a/drivers/mtd/nand/denali_pci.c +++ b/drivers/mtd/nand/denali_pci.c @@ -124,3 +124,7 @@ static struct pci_driver denali_pci_driver = { }; module_pci_driver(denali_pci_driver); + +MODULE_DESCRIPTION("PCI driver for Denali NAND controller"); +MODULE_AUTHOR("Intel Corporation and its suppliers"); +MODULE_LICENSE("GPL v2"); From 80baea0e6cf36f64905fd7d50b4fe2fe846b6a4a Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Tue, 12 Dec 2017 14:31:30 -0500 Subject: [PATCH 1563/3326] igb: Free IRQs when device is hotplugged commit 888f22931478a05bc81ceb7295c626e1292bf0ed upstream. Recently I got a Caldigit TS3 Thunderbolt 3 dock, and noticed that upon hotplugging my kernel would immediately crash due to igb: [ 680.825801] kernel BUG at drivers/pci/msi.c:352! [ 680.828388] invalid opcode: 0000 [#1] SMP [ 680.829194] Modules linked in: igb(O) thunderbolt i2c_algo_bit joydev vfat fat btusb btrtl btbcm btintel bluetooth ecdh_generic hp_wmi sparse_keymap rfkill wmi_bmof iTCO_wdt intel_rapl x86_pkg_temp_thermal coretemp crc32_pclmul snd_pcm rtsx_pci_ms mei_me snd_timer memstick snd pcspkr mei soundcore i2c_i801 tpm_tis psmouse shpchp wmi tpm_tis_core tpm video hp_wireless acpi_pad rtsx_pci_sdmmc mmc_core crc32c_intel serio_raw rtsx_pci mfd_core xhci_pci xhci_hcd i2c_hid i2c_core [last unloaded: igb] [ 680.831085] CPU: 1 PID: 78 Comm: kworker/u16:1 Tainted: G O 4.15.0-rc3Lyude-Test+ #6 [ 680.831596] Hardware name: HP HP ZBook Studio G4/826B, BIOS P71 Ver. 01.03 06/09/2017 [ 680.832168] Workqueue: kacpi_hotplug acpi_hotplug_work_fn [ 680.832687] RIP: 0010:free_msi_irqs+0x180/0x1b0 [ 680.833271] RSP: 0018:ffffc9000030fbf0 EFLAGS: 00010286 [ 680.833761] RAX: ffff8803405f9c00 RBX: ffff88033e3d2e40 RCX: 000000000000002c [ 680.834278] RDX: 0000000000000000 RSI: 00000000000000ac RDI: ffff880340be2178 [ 680.834832] RBP: 0000000000000000 R08: ffff880340be1ff0 R09: ffff8803405f9c00 [ 680.835342] R10: 0000000000000000 R11: 0000000000000040 R12: ffff88033d63a298 [ 680.835822] R13: ffff88033d63a000 R14: 0000000000000060 R15: ffff880341959000 [ 680.836332] FS: 0000000000000000(0000) GS:ffff88034f440000(0000) knlGS:0000000000000000 [ 680.836817] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 680.837360] CR2: 000055e64044afdf CR3: 0000000001c09002 CR4: 00000000003606e0 [ 680.837954] Call Trace: [ 680.838853] pci_disable_msix+0xce/0xf0 [ 680.839616] igb_reset_interrupt_capability+0x5d/0x60 [igb] [ 680.840278] igb_remove+0x9d/0x110 [igb] [ 680.840764] pci_device_remove+0x36/0xb0 [ 680.841279] device_release_driver_internal+0x157/0x220 [ 680.841739] pci_stop_bus_device+0x7d/0xa0 [ 680.842255] pci_stop_bus_device+0x2b/0xa0 [ 680.842722] pci_stop_bus_device+0x3d/0xa0 [ 680.843189] pci_stop_and_remove_bus_device+0xe/0x20 [ 680.843627] trim_stale_devices+0xf3/0x140 [ 680.844086] trim_stale_devices+0x94/0x140 [ 680.844532] trim_stale_devices+0xa6/0x140 [ 680.845031] ? get_slot_status+0x90/0xc0 [ 680.845536] acpiphp_check_bridge.part.5+0xfe/0x140 [ 680.846021] acpiphp_hotplug_notify+0x175/0x200 [ 680.846581] ? free_bridge+0x100/0x100 [ 680.847113] acpi_device_hotplug+0x8a/0x490 [ 680.847535] acpi_hotplug_work_fn+0x1a/0x30 [ 680.848076] process_one_work+0x182/0x3a0 [ 680.848543] worker_thread+0x2e/0x380 [ 680.848963] ? process_one_work+0x3a0/0x3a0 [ 680.849373] kthread+0x111/0x130 [ 680.849776] ? kthread_create_worker_on_cpu+0x50/0x50 [ 680.850188] ret_from_fork+0x1f/0x30 [ 680.850601] Code: 43 14 85 c0 0f 84 d5 fe ff ff 31 ed eb 0f 83 c5 01 39 6b 14 0f 86 c5 fe ff ff 8b 7b 10 01 ef e8 b7 e4 d2 ff 48 83 78 70 00 74 e3 <0f> 0b 49 8d b5 a0 00 00 00 e8 62 6f d3 ff e9 c7 fe ff ff 48 8b [ 680.851497] RIP: free_msi_irqs+0x180/0x1b0 RSP: ffffc9000030fbf0 As it turns out, normally the freeing of IRQs that would fix this is called inside of the scope of __igb_close(). However, since the device is already gone by the point we try to unregister the netdevice from the driver due to a hotplug we end up seeing that the netif isn't present and thus, forget to free any of the device IRQs. So: make sure that if we're in the process of dismantling the netdev, we always allow __igb_close() to be called so that IRQs may be freed normally. Additionally, only allow igb_close() to be called from __igb_close() if it hasn't already been called for the given adapter. Signed-off-by: Lyude Paul Fixes: 9474933caf21 ("igb: close/suspend race in netif_device_detach") Cc: Todd Fujinaka Cc: Stephen Hemminger Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/igb/igb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 667dbc7d4a4ec6..d1a44a84c97e68 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3331,7 +3331,7 @@ static int __igb_close(struct net_device *netdev, bool suspending) int igb_close(struct net_device *netdev) { - if (netif_device_present(netdev)) + if (netif_device_present(netdev) || netdev->dismantle) return __igb_close(netdev, false); return 0; } From 516868c59d82705b7c636ec2bdd3d6cb0e989b06 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Wed, 17 Jan 2018 20:27:11 +0200 Subject: [PATCH 1564/3326] ima/policy: fix parsing of fsuuid commit 36447456e1cca853188505f2a964dbbeacfc7a7a upstream. The switch to uuid_t invereted the logic of verfication that &entry->fsuuid is zero during parsing of "fsuuid=" rule. Instead of making sure the &entry->fsuuid field is not attempted to be overwritten, we bail out for perfectly correct rule. Fixes: 787d8c530af7 ("ima/policy: switch to use uuid_t") Signed-off-by: Mike Rapoport Signed-off-by: Mimi Zohar Signed-off-by: Greg Kroah-Hartman --- security/integrity/ima/ima_policy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 95209a5f8595af..8daf16e1d421d4 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -743,7 +743,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) case Opt_fsuuid: ima_log_string(ab, "fsuuid", args[0].from); - if (uuid_is_null(&entry->fsuuid)) { + if (!uuid_is_null(&entry->fsuuid)) { result = -EINVAL; break; } From 791274e773952f80990bc10757d0281bffbe6955 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Tue, 26 Dec 2017 20:34:22 -0800 Subject: [PATCH 1565/3326] scsi: aacraid: Fix udev inquiry race condition commit f4e8708d3104437fd7716e957f38c265b0c509ef upstream. When udev requests for a devices inquiry string, it might create multiple threads causing a race condition on the shared inquiry resource string. Created a buffer with the string for each thread. Fixes: 3bc8070fb75b3315 ([SCSI] aacraid: SMC vendor identification) Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/aacraid/aachba.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index af3e4d3f9735fd..548a3e73f10a81 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -913,8 +913,16 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) memset(str, ' ', sizeof(*str)); if (sup_adap_info->adapter_type_text[0]) { - char *cp = sup_adap_info->adapter_type_text; int c; + char *cp; + char *cname = kmemdup(sup_adap_info->adapter_type_text, + sizeof(sup_adap_info->adapter_type_text), + GFP_ATOMIC); + + if (!cname) + return; + + cp = cname; if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C')) inqstrcpy("SMC", str->vid); else { @@ -923,7 +931,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) ++cp; c = *cp; *cp = '\0'; - inqstrcpy(sup_adap_info->adapter_type_text, str->vid); + inqstrcpy(cname, str->vid); *cp = c; while (*cp && *cp != ' ') ++cp; @@ -937,8 +945,8 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) cp[sizeof(str->pid)] = '\0'; } inqstrcpy (cp, str->pid); - if (c) - cp[sizeof(str->pid)] = c; + + kfree(cname); } else { struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype); From d7fec01990e65967855790877ba829d67ebde255 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Tue, 26 Dec 2017 20:34:24 -0800 Subject: [PATCH 1566/3326] scsi: aacraid: Fix hang in kdump commit c5313ae8e4e037bfaf5e56cb8d6efdb8e92ce437 upstream. Driver attempts to perform a device scan and device add after coming out of reset. At times when the kdump kernel loads and it tries to perform eh recovery, the device scan hangs since its commands are blocked because of the eh recovery. This should have shown up in normal eh recovery path (Should have been obvious) Remove the code that performs scanning.I can live without the rescanning support in the stable kernels but a hanging kdump/eh recovery needs to be fixed. Fixes: a2d0321dd532901e (scsi: aacraid: Reload offlined drives after controller reset) Reported-by: Douglas Miller Tested-by: Guilherme G. Piccoli Fixes: a2d0321dd532901e (scsi: aacraid: Reload offlined drives after controller reset) Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/aacraid/aachba.c | 1 - drivers/scsi/aacraid/commsup.c | 9 +-------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 548a3e73f10a81..7173ae53c5260c 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -918,7 +918,6 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) char *cname = kmemdup(sup_adap_info->adapter_type_text, sizeof(sup_adap_info->adapter_type_text), GFP_ATOMIC); - if (!cname) return; diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 525a652dab48e9..ffbfd042be08c9 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1672,14 +1672,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) out: aac->in_reset = 0; scsi_unblock_requests(host); - /* - * Issue bus rescan to catch any configuration that might have - * occurred - */ - if (!retval) { - dev_info(&aac->pdev->dev, "Issuing bus rescan\n"); - scsi_scan_host(host); - } + if (jafo) { spin_lock_irq(host->host_lock); } From bdf19237e16f5495dab1edb0ec5eb60049d02a60 Mon Sep 17 00:00:00 2001 From: Markus Trippelsdorf Date: Wed, 11 Oct 2017 07:01:31 +0200 Subject: [PATCH 1567/3326] VFS: Handle lazytime in do_mount() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d7ee946942bdd12394809305e3df05aa4c8b7b8f upstream. Since commit e462ec50cb5fa ("VFS: Differentiate mount flags (MS_*) from internal superblock flags") the lazytime mount option doesn't get passed on anymore. Fix the issue by handling the option in do_mount(). Reviewed-by: Lukas Czerner Signed-off-by: Markus Trippelsdorf Signed-off-by: Al Viro Cc: Holger Hoffstätte Signed-off-by: Greg Kroah-Hartman --- fs/namespace.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/namespace.c b/fs/namespace.c index d18deb4c410b24..adae9ffce91d89 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2826,6 +2826,7 @@ long do_mount(const char *dev_name, const char __user *dir_name, SB_DIRSYNC | SB_SILENT | SB_POSIXACL | + SB_LAZYTIME | SB_I_VERSION); if (flags & MS_REMOUNT) From 08bb42086b3e9c5619e6e82aed576eae8a6583ef Mon Sep 17 00:00:00 2001 From: Stefan Schake Date: Fri, 10 Nov 2017 02:05:06 +0100 Subject: [PATCH 1568/3326] drm/vc4: Account for interrupts in flight [ Upstream commit 253696ccd613fbdaa5aba1de44c461a058e0a114 ] Synchronously disable the IRQ to make the following cancel_work_sync invocation effective. An interrupt in flight could enqueue further overflow mem work. As we free the binner BO immediately following vc4_irq_uninstall this caused a NULL pointer dereference in the work callback vc4_overflow_mem_work. Link: https://github.com/anholt/linux/issues/114 Signed-off-by: Stefan Schake Fixes: d5b1a78a772f ("drm/vc4: Add support for drawing 3D frames.") Signed-off-by: Eric Anholt Reviewed-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/1510275907-993-2-git-send-email-stschake@gmail.com Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/vc4/vc4_irq.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index 7d7af3a93d941b..61b2e5377993dc 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c @@ -208,6 +208,9 @@ vc4_irq_postinstall(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); + /* Undo the effects of a previous vc4_irq_uninstall. */ + enable_irq(dev->irq); + /* Enable both the render done and out of memory interrupts. */ V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); @@ -225,6 +228,9 @@ vc4_irq_uninstall(struct drm_device *dev) /* Clear any pending interrupts we might have left. */ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); + /* Finish any interrupt handler still in flight. */ + disable_irq(dev->irq); + cancel_work_sync(&vc4->overflow_mem_work); } From 9a8215c0a91e9182761d52e933fd635c7a7a47cd Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Mon, 23 Oct 2017 09:58:46 +0300 Subject: [PATCH 1569/3326] btrfs: Fix transaction abort during failure in btrfs_rm_dev_item [ Upstream commit 5e9f2ad5b2904a7e81df6d9a3dbef29478952eac ] btrfs_rm_dev_item calls several function under an active transaction, however it fails to abort it if an error happens. Fix this by adding explicit btrfs_abort_transaction/btrfs_end_transaction calls. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/volumes.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0c11121a8ace70..4006b2a1233dcf 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1765,20 +1765,24 @@ static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info, key.offset = device->devid; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); - if (ret < 0) - goto out; - - if (ret > 0) { - ret = -ENOENT; + if (ret) { + if (ret > 0) + ret = -ENOENT; + btrfs_abort_transaction(trans, ret); + btrfs_end_transaction(trans); goto out; } ret = btrfs_del_item(trans, root, path); - if (ret) - goto out; + if (ret) { + btrfs_abort_transaction(trans, ret); + btrfs_end_transaction(trans); + } + out: btrfs_free_path(path); - btrfs_commit_transaction(trans); + if (!ret) + ret = btrfs_commit_transaction(trans); return ret; } From 71341a8a703752278bbaf6e4502f78ff50e645f7 Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Mon, 30 Oct 2017 11:14:38 -0600 Subject: [PATCH 1570/3326] Btrfs: bail out gracefully rather than BUG_ON [ Upstream commit 56a0e706fcf870270878d6d72b71092ae42d229c ] If a file's DIR_ITEM key is invalid (due to memory errors) and gets written to disk, a future lookup_path can end up with kernel panic due to BUG_ON(). This gets rid of the BUG_ON(), meanwhile output the corrupted key and return ENOENT if it's invalid. Signed-off-by: Liu Bo Reported-by: Guillaume Bouchard Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/inode.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d94e3f68b9b134..c71afd424900ea 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5500,6 +5500,14 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, goto out_err; btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); + if (location->type != BTRFS_INODE_ITEM_KEY && + location->type != BTRFS_ROOT_ITEM_KEY) { + btrfs_warn(root->fs_info, +"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))", + __func__, name, btrfs_ino(BTRFS_I(dir)), + location->objectid, location->type, location->offset); + goto out_err; + } out: btrfs_free_path(path); return ret; @@ -5816,8 +5824,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) return inode; } - BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY); - index = srcu_read_lock(&fs_info->subvol_srcu); ret = fixup_tree_root_location(fs_info, dir, dentry, &location, &sub_root); From 1d285c0440fe784e39e9110cc75ca21719b92ead Mon Sep 17 00:00:00 2001 From: Abhishek Goel Date: Tue, 7 Nov 2017 15:17:55 +0530 Subject: [PATCH 1571/3326] cpupowerutils: bench - Fix cpu online check [ Upstream commit 53d1cd6b125fb9d69303516a1179ebc3b72f797a ] cpupower_is_cpu_online was incorrectly checking for 0. This patch fixes this by checking for 1 when the cpu is online. Signed-off-by: Abhishek Goel Signed-off-by: Shuah Khan Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/power/cpupower/bench/system.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/power/cpupower/bench/system.c b/tools/power/cpupower/bench/system.c index c25a74ae51baef..2bb3eef7d5c1fb 100644 --- a/tools/power/cpupower/bench/system.c +++ b/tools/power/cpupower/bench/system.c @@ -61,7 +61,7 @@ int set_cpufreq_governor(char *governor, unsigned int cpu) dprintf("set %s as cpufreq governor\n", governor); - if (cpupower_is_cpu_online(cpu) != 0) { + if (cpupower_is_cpu_online(cpu) != 1) { perror("cpufreq_cpu_exists"); fprintf(stderr, "error: cpu %u does not exist\n", cpu); return -1; From f1881befacbe7f91b49544b0860b65eb6af93429 Mon Sep 17 00:00:00 2001 From: Abhishek Goel Date: Wed, 15 Nov 2017 14:10:02 +0530 Subject: [PATCH 1572/3326] cpupower : Fix cpupower working when cpu0 is offline [ Upstream commit dbdc468f35ee827cab2753caa1c660bdb832243a ] cpuidle_monitor used to assume that cpu0 is always online which is not a valid assumption on POWER machines. This patch fixes this by getting the cpu on which the current thread is running, instead of always using cpu0 for monitoring which may not be online. Signed-off-by: Abhishek Goel Signed-off-by: Shuah Khan Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c index 1b5da0066ebf90..5b3205f1621749 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c +++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c @@ -130,15 +130,18 @@ static struct cpuidle_monitor *cpuidle_register(void) { int num; char *tmp; + int this_cpu; + + this_cpu = sched_getcpu(); /* Assume idle state count is the same for all CPUs */ - cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0); + cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu); if (cpuidle_sysfs_monitor.hw_states_num <= 0) return NULL; for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) { - tmp = cpuidle_state_name(0, num); + tmp = cpuidle_state_name(this_cpu, num); if (tmp == NULL) continue; @@ -146,7 +149,7 @@ static struct cpuidle_monitor *cpuidle_register(void) strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1); free(tmp); - tmp = cpuidle_state_desc(0, num); + tmp = cpuidle_state_desc(this_cpu, num); if (tmp == NULL) continue; strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1); From d003b4bfda3b3b8f42fba2236c774084e437cad0 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 6 Nov 2017 16:15:10 +0200 Subject: [PATCH 1573/3326] KVM: nVMX/nSVM: Don't intercept #UD when running L2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit ac9b305caa0df6f5b75d294e4b86c1027648991e ] When running L2, #UD should be intercepted by L1 or just forwarded directly to L2. It should not reach L0 x86 emulator. Therefore, set intercept for #UD only based on L1 exception-bitmap. Also add WARN_ON_ONCE() on L0 #UD intercept handlers to make sure it is never reached while running L2. This improves commit ae1f57670703 ("KVM: nVMX: Do not emulate #UD while in guest mode") by removing an unnecessary exit from L2 to L0 on #UD when L1 doesn't intercept it. In addition, SVM L0 #UD intercept handler doesn't handle correctly the case it is raised from L2. In this case, it should forward the #UD to guest instead of x86 emulator. As done in VMX #UD intercept handler. This commit fixes this issue as-well. Signed-off-by: Liran Alon Reviewed-by: Nikita Leshenko Reviewed-by: Konrad Rzeszutek Wilk Signed-off-by: Konrad Rzeszutek Wilk Reviewed-by: Paolo Bonzini Reviewed-by: Wanpeng Li Signed-off-by: Radim Krčmář Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/svm.c | 9 ++++++++- arch/x86/kvm/vmx.c | 9 ++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 6a8284f7232830..c8be4e6d365b8a 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -362,6 +362,7 @@ static void recalc_intercepts(struct vcpu_svm *svm) { struct vmcb_control_area *c, *h; struct nested_state *g; + u32 h_intercept_exceptions; mark_dirty(svm->vmcb, VMCB_INTERCEPTS); @@ -372,9 +373,14 @@ static void recalc_intercepts(struct vcpu_svm *svm) h = &svm->nested.hsave->control; g = &svm->nested; + /* No need to intercept #UD if L1 doesn't intercept it */ + h_intercept_exceptions = + h->intercept_exceptions & ~(1U << UD_VECTOR); + c->intercept_cr = h->intercept_cr | g->intercept_cr; c->intercept_dr = h->intercept_dr | g->intercept_dr; - c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions; + c->intercept_exceptions = + h_intercept_exceptions | g->intercept_exceptions; c->intercept = h->intercept | g->intercept; } @@ -2189,6 +2195,7 @@ static int ud_interception(struct vcpu_svm *svm) { int er; + WARN_ON_ONCE(is_guest_mode(&svm->vcpu)); er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); if (er == EMULATE_USER_EXIT) return 0; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ef16cf0f7cfd0c..36628ed362d8f9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1891,7 +1891,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) { u32 eb; - eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | + eb = (1u << PF_VECTOR) | (1u << MC_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR); if ((vcpu->guest_debug & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == @@ -1909,6 +1909,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) */ if (is_guest_mode(vcpu)) eb |= get_vmcs12(vcpu)->exception_bitmap; + else + eb |= 1u << UD_VECTOR; vmcs_write32(EXCEPTION_BITMAP, eb); } @@ -5919,10 +5921,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) return 1; /* already handled by vmx_vcpu_run() */ if (is_invalid_opcode(intr_info)) { - if (is_guest_mode(vcpu)) { - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; - } + WARN_ON_ONCE(is_guest_mode(vcpu)); er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); if (er == EMULATE_USER_EXIT) return 0; From fbd81f09793b8ab0696782da0c081599b6620efc Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Sun, 5 Nov 2017 16:56:33 +0200 Subject: [PATCH 1574/3326] KVM: x86: emulator: Return to user-mode on L1 CPL=0 emulation failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 1f4dcb3b213235e642088709a1c54964d23365e9 ] On this case, handle_emulation_failure() fills kvm_run with internal-error information which it expects to be delivered to user-mode for further processing. However, the code reports a wrong return-value which makes KVM to never return to user-mode on this scenario. Fixes: 6d77dbfc88e3 ("KVM: inject #UD if instruction emulation fails and exit to userspace") Signed-off-by: Liran Alon Reviewed-by: Nikita Leshenko Reviewed-by: Konrad Rzeszutek Wilk Signed-off-by: Konrad Rzeszutek Wilk Reviewed-by: Wanpeng Li Signed-off-by: Radim Krčmář Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 575c8953cc9a35..b25326ad82a13a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5416,7 +5416,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu) vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; - r = EMULATE_FAIL; + r = EMULATE_USER_EXIT; } kvm_queue_exception(vcpu, UD_VECTOR); From df54fc5c28d2921ae4eb0d3401db929cb0c62e56 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Sun, 5 Nov 2017 16:56:34 +0200 Subject: [PATCH 1575/3326] KVM: x86: Don't re-execute instruction when not passing CR2 value MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 9b8ae63798cb97e785a667ff27e43fa6220cb734 ] In case of instruction-decode failure or emulation failure, x86_emulate_instruction() will call reexecute_instruction() which will attempt to use the cr2 value passed to x86_emulate_instruction(). However, when x86_emulate_instruction() is called from emulate_instruction(), cr2 is not passed (passed as 0) and therefore it doesn't make sense to execute reexecute_instruction() logic at all. Fixes: 51d8b66199e9 ("KVM: cleanup emulate_instruction") Signed-off-by: Liran Alon Reviewed-by: Nikita Leshenko Reviewed-by: Konrad Rzeszutek Wilk Signed-off-by: Konrad Rzeszutek Wilk Reviewed-by: Wanpeng Li Signed-off-by: Radim Krčmář Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/vmx.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index eb38ac9d9a31c2..4f8b801996722d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1156,7 +1156,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, static inline int emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) { - return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); + return x86_emulate_instruction(vcpu, 0, + emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0); } void kvm_enable_efer_bits(u64); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 36628ed362d8f9..079cedb910bc8c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6607,7 +6607,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) if (kvm_test_request(KVM_REQ_EVENT, vcpu)) return 1; - err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); + err = emulate_instruction(vcpu, 0); if (err == EMULATE_USER_EXIT) { ++vcpu->stat.mmio_exits; From 4f5500a63455815c45b28aa740fce91b49630188 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Sun, 5 Nov 2017 16:54:47 -0800 Subject: [PATCH 1576/3326] KVM: X86: Fix operand/address-size during instruction decoding MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 3853be2603191829b442b64dac6ae8ba0c027bf9 ] Pedro reported: During tests that we conducted on KVM, we noticed that executing a "PUSH %ES" instruction under KVM produces different results on both memory and the SP register depending on whether EPT support is enabled. With EPT the SP is reduced by 4 bytes (and the written value is 0-padded) but without EPT support it is only reduced by 2 bytes. The difference can be observed when the CS.DB field is 1 (32-bit) but not when it's 0 (16-bit). The internal segment descriptor cache exist even in real/vm8096 mode. The CS.D also should be respected instead of just default operand/address-size/66H prefix/67H prefix during instruction decoding. This patch fixes it by also adjusting operand/address-size according to CS.D. Reported-by: Pedro Fonseca Tested-by: Pedro Fonseca Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Nadav Amit Cc: Pedro Fonseca Signed-off-by: Wanpeng Li Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/emulate.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 7bbb5da2b49da0..1a2543545e5da8 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -5009,6 +5009,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) bool op_prefix = false; bool has_seg_override = false; struct opcode opcode; + u16 dummy; + struct desc_struct desc; ctxt->memop.type = OP_NONE; ctxt->memopp = NULL; @@ -5027,6 +5029,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) switch (mode) { case X86EMUL_MODE_REAL: case X86EMUL_MODE_VM86: + def_op_bytes = def_ad_bytes = 2; + ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS); + if (desc.d) + def_op_bytes = def_ad_bytes = 4; + break; case X86EMUL_MODE_PROT16: def_op_bytes = def_ad_bytes = 2; break; From 5e7c270a1e60fcc16e8bcda6c6d659b43eba1ddb Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Sun, 5 Nov 2017 16:54:49 -0800 Subject: [PATCH 1577/3326] KVM: nVMX: Fix mmu context after VMLAUNCH/VMRESUME failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 5af4157388adad82c339e3742fb6b67840721347 ] Commit 4f350c6dbcb (kvm: nVMX: Handle deferred early VMLAUNCH/VMRESUME failure properly) can result in L1(run kvm-unit-tests/run_tests.sh vmx_controls in L1) null pointer deference and also L0 calltrace when EPT=0 on both L0 and L1. In L1: BUG: unable to handle kernel paging request at ffffffffc015bf8f IP: vmx_vcpu_run+0x202/0x510 [kvm_intel] PGD 146e13067 P4D 146e13067 PUD 146e15067 PMD 3d2686067 PTE 3d4af9161 Oops: 0003 [#1] PREEMPT SMP CPU: 2 PID: 1798 Comm: qemu-system-x86 Not tainted 4.14.0-rc4+ #6 RIP: 0010:vmx_vcpu_run+0x202/0x510 [kvm_intel] Call Trace: WARNING: kernel stack frame pointer at ffffb86f4988bc18 in qemu-system-x86:1798 has bad value 0000000000000002 In L0: -----------[ cut here ]------------ WARNING: CPU: 6 PID: 4460 at /home/kernel/linux/arch/x86/kvm//vmx.c:9845 vmx_inject_page_fault_nested+0x130/0x140 [kvm_intel] CPU: 6 PID: 4460 Comm: qemu-system-x86 Tainted: G OE 4.14.0-rc7+ #25 RIP: 0010:vmx_inject_page_fault_nested+0x130/0x140 [kvm_intel] Call Trace: paging64_page_fault+0x500/0xde0 [kvm] ? paging32_gva_to_gpa_nested+0x120/0x120 [kvm] ? nonpaging_page_fault+0x3b0/0x3b0 [kvm] ? __asan_storeN+0x12/0x20 ? paging64_gva_to_gpa+0xb0/0x120 [kvm] ? paging64_walk_addr_generic+0x11a0/0x11a0 [kvm] ? lock_acquire+0x2c0/0x2c0 ? vmx_read_guest_seg_ar+0x97/0x100 [kvm_intel] ? vmx_get_segment+0x2a6/0x310 [kvm_intel] ? sched_clock+0x1f/0x30 ? check_chain_key+0x137/0x1e0 ? __lock_acquire+0x83c/0x2420 ? kvm_multiple_exception+0xf2/0x220 [kvm] ? debug_check_no_locks_freed+0x240/0x240 ? debug_smp_processor_id+0x17/0x20 ? __lock_is_held+0x9e/0x100 kvm_mmu_page_fault+0x90/0x180 [kvm] kvm_handle_page_fault+0x15c/0x310 [kvm] ? __lock_is_held+0x9e/0x100 handle_exception+0x3c7/0x4d0 [kvm_intel] vmx_handle_exit+0x103/0x1010 [kvm_intel] ? kvm_arch_vcpu_ioctl_run+0x1628/0x2e20 [kvm] The commit avoids to load host state of vmcs12 as vmcs01's guest state since vmcs12 is not modified (except for the VM-instruction error field) if the checking of vmcs control area fails. However, the mmu context is switched to nested mmu in prepare_vmcs02() and it will not be reloaded since load_vmcs12_host_state() is skipped when nested VMLAUNCH/VMRESUME fails. This patch fixes it by reloading mmu context when nested VMLAUNCH/VMRESUME fails. Reviewed-by: Jim Mattson Reviewed-by: Krish Sadhukhan Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Jim Mattson Signed-off-by: Wanpeng Li Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 079cedb910bc8c..3a16e45f0c2e21 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11339,6 +11339,24 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, kvm_clear_interrupt_queue(vcpu); } +static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + u32 entry_failure_code; + + nested_ept_uninit_mmu_context(vcpu); + + /* + * Only PDPTE load can fail as the value of cr3 was checked on entry and + * couldn't have changed. + */ + if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); + + if (!enable_ept) + vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; +} + /* * A part of what we need to when the nested L2 guest exits and we want to * run its L1 parent, is to reset L1's guest state to the host state specified @@ -11352,7 +11370,6 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct kvm_segment seg; - u32 entry_failure_code; if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->host_ia32_efer; @@ -11379,17 +11396,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); vmx_set_cr4(vcpu, vmcs12->host_cr4); - nested_ept_uninit_mmu_context(vcpu); - - /* - * Only PDPTE load can fail as the value of cr3 was checked on entry and - * couldn't have changed. - */ - if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) - nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); - - if (!enable_ept) - vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; + load_vmcs12_mmu_host_state(vcpu, vmcs12); if (enable_vpid) { /* @@ -11615,6 +11622,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, * accordingly. */ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + + load_vmcs12_mmu_host_state(vcpu, vmcs12); + /* * The emulated instruction was already skipped in * nested_vmx_run, but the updated RIP was never From f0a3691bf2e5692ec3acf60aea8dc098a6ce2eae Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 7 Nov 2017 18:04:05 +0100 Subject: [PATCH 1578/3326] KVM: x86: fix em_fxstor() sleeping while in atomic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 4d772cb85f64c16eca00177089ecb3cd5d292120 ] Commit 9d643f63128b ("KVM: x86: avoid large stack allocations in em_fxrstor") optimize the stack size, but introduced a guest memory access which might sleep while in atomic. Fix it by introducing, again, a second fxregs_state. Try to avoid large stacks by using noinline. Add some helpful comments. Reported by syzbot: in_atomic(): 1, irqs_disabled(): 0, pid: 2909, name: syzkaller879109 2 locks held by syzkaller879109/2909: #0: (&vcpu->mutex){+.+.}, at: [] vcpu_load+0x1c/0x70 arch/x86/kvm/../../../virt/kvm/kvm_main.c:154 #1: (&kvm->srcu){....}, at: [] vcpu_enter_guest arch/x86/kvm/x86.c:6983 [inline] #1: (&kvm->srcu){....}, at: [] vcpu_run arch/x86/kvm/x86.c:7061 [inline] #1: (&kvm->srcu){....}, at: [] kvm_arch_vcpu_ioctl_run+0x1bc2/0x58b0 arch/x86/kvm/x86.c:7222 CPU: 1 PID: 2909 Comm: syzkaller879109 Not tainted 4.13.0-rc4-next-20170811 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:16 [inline] dump_stack+0x194/0x257 lib/dump_stack.c:52 ___might_sleep+0x2b2/0x470 kernel/sched/core.c:6014 __might_sleep+0x95/0x190 kernel/sched/core.c:5967 __might_fault+0xab/0x1d0 mm/memory.c:4383 __copy_from_user include/linux/uaccess.h:71 [inline] __kvm_read_guest_page+0x58/0xa0 arch/x86/kvm/../../../virt/kvm/kvm_main.c:1771 kvm_vcpu_read_guest_page+0x44/0x60 arch/x86/kvm/../../../virt/kvm/kvm_main.c:1791 kvm_read_guest_virt_helper+0x76/0x140 arch/x86/kvm/x86.c:4407 kvm_read_guest_virt_system+0x3c/0x50 arch/x86/kvm/x86.c:4466 segmented_read_std+0x10c/0x180 arch/x86/kvm/emulate.c:819 em_fxrstor+0x27b/0x410 arch/x86/kvm/emulate.c:4022 x86_emulate_insn+0x55d/0x3c50 arch/x86/kvm/emulate.c:5471 x86_emulate_instruction+0x411/0x1ca0 arch/x86/kvm/x86.c:5698 kvm_mmu_page_fault+0x18b/0x2c0 arch/x86/kvm/mmu.c:4854 handle_ept_violation+0x1fc/0x5e0 arch/x86/kvm/vmx.c:6400 vmx_handle_exit+0x281/0x1ab0 arch/x86/kvm/vmx.c:8718 vcpu_enter_guest arch/x86/kvm/x86.c:6999 [inline] vcpu_run arch/x86/kvm/x86.c:7061 [inline] kvm_arch_vcpu_ioctl_run+0x1cee/0x58b0 arch/x86/kvm/x86.c:7222 kvm_vcpu_ioctl+0x64c/0x1010 arch/x86/kvm/../../../virt/kvm/kvm_main.c:2591 vfs_ioctl fs/ioctl.c:45 [inline] do_vfs_ioctl+0x1b1/0x1520 fs/ioctl.c:685 SYSC_ioctl fs/ioctl.c:700 [inline] SyS_ioctl+0x8f/0xc0 fs/ioctl.c:691 entry_SYSCALL_64_fastpath+0x1f/0xbe RIP: 0033:0x437fc9 RSP: 002b:00007ffc7b4d5ab8 EFLAGS: 00000206 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 00000000004002b0 RCX: 0000000000437fc9 RDX: 0000000000000000 RSI: 000000000000ae80 RDI: 0000000000000005 RBP: 0000000000000086 R08: 0000000000000000 R09: 0000000020ae8000 R10: 0000000000009120 R11: 0000000000000206 R12: 0000000000000000 R13: 0000000000000004 R14: 0000000000000004 R15: 0000000020077000 Fixes: 9d643f63128b ("KVM: x86: avoid large stack allocations in em_fxrstor") Signed-off-by: David Hildenbrand Signed-off-by: Radim Krčmář Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/emulate.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 1a2543545e5da8..eca6a89f232608 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -4023,6 +4023,26 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt) fxstate_size(ctxt)); } +/* + * FXRSTOR might restore XMM registers not provided by the guest. Fill + * in the host registers (via FXSAVE) instead, so they won't be modified. + * (preemption has to stay disabled until FXRSTOR). + * + * Use noinline to keep the stack for other functions called by callers small. + */ +static noinline int fxregs_fixup(struct fxregs_state *fx_state, + const size_t used_size) +{ + struct fxregs_state fx_tmp; + int rc; + + rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp)); + memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size, + __fxstate_size(16) - used_size); + + return rc; +} + static int em_fxrstor(struct x86_emulate_ctxt *ctxt) { struct fxregs_state fx_state; @@ -4033,19 +4053,19 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return rc; + size = fxstate_size(ctxt); + rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size); + if (rc != X86EMUL_CONTINUE) + return rc; + ctxt->ops->get_fpu(ctxt); - size = fxstate_size(ctxt); if (size < __fxstate_size(16)) { - rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); + rc = fxregs_fixup(&fx_state, size); if (rc != X86EMUL_CONTINUE) goto out; } - rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size); - if (rc != X86EMUL_CONTINUE) - goto out; - if (fx_state.mxcsr >> 16) { rc = emulate_gp(ctxt, 0); goto out; From b501603be9dfc38725feb8dddf1bdf2bbf3bd6ea Mon Sep 17 00:00:00 2001 From: Nikita Leshenko Date: Sun, 5 Nov 2017 15:52:29 +0200 Subject: [PATCH 1579/3326] KVM: x86: ioapic: Fix level-triggered EOI and IOAPIC reconfigure race MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 0fc5a36dd6b345eb0d251a65c236e53bead3eef7 ] KVM uses ioapic_handled_vectors to track vectors that need to notify the IOAPIC on EOI. The problem is that IOAPIC can be reconfigured while an interrupt with old configuration is pending or running and ioapic_handled_vectors only remembers the newest configuration; thus EOI from the old interrupt is not delievered to the IOAPIC. A previous commit db2bdcbbbd32 ("KVM: x86: fix edge EOI and IOAPIC reconfig race") addressed this issue by adding pending edge-triggered interrupts to ioapic_handled_vectors, fixing this race for edge-triggered interrupts. The commit explicitly ignored level-triggered interrupts, but this race applies to them as well: 1) IOAPIC sends a level triggered interrupt vector to VCPU0 2) VCPU0's handler deasserts the irq line and reconfigures the IOAPIC to route the vector to VCPU1. The reconfiguration rewrites only the upper 32 bits of the IOREDTBLn register. (Causes KVM to update ioapic_handled_vectors for VCPU0 and it no longer includes the vector.) 3) VCPU0 sends EOI for the vector, but it's not delievered to the IOAPIC because the ioapic_handled_vectors doesn't include the vector. 4) New interrupts are not delievered to VCPU1 because remote_irr bit is set forever. Therefore, the correct behavior is to add all pending and running interrupts to ioapic_handled_vectors. This commit introduces a slight performance hit similar to commit db2bdcbbbd32 ("KVM: x86: fix edge EOI and IOAPIC reconfig race") for the rare case that the vector is reused by a non-IOAPIC source on VCPU0. We prefer to keep solution simple and not handle this case just as the original commit does. Fixes: db2bdcbbbd32 ("KVM: x86: fix edge EOI and IOAPIC reconfig race") Signed-off-by: Nikita Leshenko Reviewed-by: Liran Alon Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Radim Krčmář Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/ioapic.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index bdff437acbcb7e..ae0a7dc318b266 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -257,8 +257,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors) index == RTC_GSI) { if (kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id, e->fields.dest_mode) || - (e->fields.trig_mode == IOAPIC_EDGE_TRIG && - kvm_apic_pending_eoi(vcpu, e->fields.vector))) + kvm_apic_pending_eoi(vcpu, e->fields.vector)) __set_bit(e->fields.vector, ioapic_handled_vectors); } From 408a265107397dc55b415628880a8f6b14dc7044 Mon Sep 17 00:00:00 2001 From: Nikita Leshenko Date: Sun, 5 Nov 2017 15:52:32 +0200 Subject: [PATCH 1580/3326] KVM: x86: ioapic: Clear Remote IRR when entry is switched to edge-triggered MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit a8bfec2930525808c01f038825d1df3904638631 ] Some OSes (Linux, Xen) use this behavior to clear the Remote IRR bit for IOAPICs without an EOI register. They simulate the EOI message manually by changing the trigger mode to edge and then back to level, with the entry being masked during this. QEMU implements this feature in commit ed1263c363c9 ("ioapic: clear remote irr bit for edge-triggered interrupts") As a side effect, this commit removes an incorrect behavior where Remote IRR was cleared when the redirection table entry was rewritten. This is not consistent with the manual and also opens an opportunity for a strange behavior when a redirection table entry is modified from an interrupt handler that handles the same entry: The modification will clear the Remote IRR bit even though the interrupt handler is still running. Signed-off-by: Nikita Leshenko Reviewed-by: Liran Alon Signed-off-by: Konrad Rzeszutek Wilk Reviewed-by: Wanpeng Li Reviewed-by: Steve Rutherford Signed-off-by: Radim Krčmář Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/ioapic.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index ae0a7dc318b266..5c9ad29e11fdc9 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -304,8 +304,17 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) } else { e->bits &= ~0xffffffffULL; e->bits |= (u32) val; - e->fields.remote_irr = 0; } + + /* + * Some OSes (Linux, Xen) assume that Remote IRR bit will + * be cleared by IOAPIC hardware when the entry is configured + * as edge-triggered. This behavior is used to simulate an + * explicit EOI on IOAPICs that don't have the EOI register. + */ + if (e->fields.trig_mode == IOAPIC_EDGE_TRIG) + e->fields.remote_irr = 0; + mask_after = e->fields.mask; if (mask_before != mask_after) kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); From b689fc5b79d547f88d464e021d6de815d8612ac3 Mon Sep 17 00:00:00 2001 From: Nikita Leshenko Date: Sun, 5 Nov 2017 15:52:33 +0200 Subject: [PATCH 1581/3326] KVM: x86: ioapic: Preserve read-only values in the redirection table MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit b200dded0a6974a3b69599832b2203483920ab25 ] According to 82093AA (IOAPIC) manual, Remote IRR and Delivery Status are read-only. QEMU implements the bits as RO in commit 479c2a1cb7fb ("ioapic: keep RO bits for IOAPIC entry"). Signed-off-by: Nikita Leshenko Reviewed-by: Liran Alon Signed-off-by: Konrad Rzeszutek Wilk Reviewed-by: Wanpeng Li Reviewed-by: Steve Rutherford Signed-off-by: Radim Krčmář Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/ioapic.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 5c9ad29e11fdc9..9d270ba9643ca8 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -276,6 +276,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) { unsigned index; bool mask_before, mask_after; + int old_remote_irr, old_delivery_status; union kvm_ioapic_redirect_entry *e; switch (ioapic->ioregsel) { @@ -298,6 +299,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) return; e = &ioapic->redirtbl[index]; mask_before = e->fields.mask; + /* Preserve read-only fields */ + old_remote_irr = e->fields.remote_irr; + old_delivery_status = e->fields.delivery_status; if (ioapic->ioregsel & 1) { e->bits &= 0xffffffff; e->bits |= (u64) val << 32; @@ -305,6 +309,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) e->bits &= ~0xffffffffULL; e->bits |= (u32) val; } + e->fields.remote_irr = old_remote_irr; + e->fields.delivery_status = old_delivery_status; /* * Some OSes (Linux, Xen) assume that Remote IRR bit will From d46e961f056aaec155bc3e21cf8e31449b5d2117 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Sun, 5 Nov 2017 16:07:43 +0200 Subject: [PATCH 1582/3326] KVM: nVMX: Fix vmx_check_nested_events() return value in case an event was reinjected to L2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 917dc6068bc12a2dafffcf0e9d405ddb1b8780cb ] vmx_check_nested_events() should return -EBUSY only in case there is a pending L1 event which requires a VMExit from L2 to L1 but such a VMExit is currently blocked. Such VMExits are blocked either because nested_run_pending=1 or an event was reinjected to L2. vmx_check_nested_events() should return 0 in case there are no pending L1 events which requires a VMExit from L2 to L1 or if a VMExit from L2 to L1 was done internally. However, upstream commit which introduced blocking in case an event was reinjected to L2 (commit acc9ab601327 ("KVM: nVMX: Fix pending events injection")) contains a bug: It returns -EBUSY even if there are no pending L1 events which requires VMExit from L2 to L1. This commit fix this issue. Fixes: acc9ab601327 ("KVM: nVMX: Fix pending events injection") Signed-off-by: Liran Alon Reviewed-by: Nikita Leshenko Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Radim Krčmář Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3a16e45f0c2e21..d073f91bc08911 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11114,13 +11114,12 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qual; - - if (kvm_event_needs_reinjection(vcpu)) - return -EBUSY; + bool block_nested_events = + vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); if (vcpu->arch.exception.pending && nested_vmx_check_exception(vcpu, &exit_qual)) { - if (vmx->nested.nested_run_pending) + if (block_nested_events) return -EBUSY; nested_vmx_inject_exception_vmexit(vcpu, exit_qual); vcpu->arch.exception.pending = false; @@ -11129,14 +11128,14 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && vmx->nested.preemption_timer_expired) { - if (vmx->nested.nested_run_pending) + if (block_nested_events) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); return 0; } if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { - if (vmx->nested.nested_run_pending) + if (block_nested_events) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, NMI_VECTOR | INTR_TYPE_NMI_INTR | @@ -11152,7 +11151,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && nested_exit_on_intr(vcpu)) { - if (vmx->nested.nested_run_pending) + if (block_nested_events) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); return 0; From 26bd01c1affe663196e05302290c08f781d2e597 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 24 Oct 2017 15:25:20 +0300 Subject: [PATCH 1583/3326] nvme-fabrics: introduce init command check for a queue that is not alive [ Upstream commit 48832f8d58cfedb2f9bee11bbfbb657efb42e7e7 ] When the fabrics queue is not alive and fully functional, no commands should be allowed to pass but connect (which moves the queue to a fully functional state). Any other command should be failed, with either temporary status BLK_STS_RESOUCE or permanent status BLK_STS_IOERR. This is shared across all fabrics, hence move the check to fabrics library. Signed-off-by: Sagi Grimberg Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/host/fabrics.h | 30 ++++++++++++++++++++++++++++++ drivers/nvme/host/rdma.c | 32 ++++++-------------------------- 2 files changed, 36 insertions(+), 26 deletions(-) diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index bf33663218cd24..9ff8529a64a96d 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -142,4 +142,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts); int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); +static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl, + struct request *rq) +{ + struct nvme_command *cmd = nvme_req(rq)->cmd; + + /* + * We cannot accept any other command until the connect command has + * completed, so only allow connect to pass. + */ + if (!blk_rq_is_passthrough(rq) || + cmd->common.opcode != nvme_fabrics_command || + cmd->fabrics.fctype != nvme_fabrics_type_connect) { + /* + * Reconnecting state means transport disruption, which can take + * a long time and even might fail permanently, fail fast to + * give upper layers a chance to failover. + * Deleting state means that the ctrl will never accept commands + * again, fail it permanently. + */ + if (ctrl->state == NVME_CTRL_RECONNECTING || + ctrl->state == NVME_CTRL_DELETING) { + nvme_req(rq)->status = NVME_SC_ABORT_REQ; + return BLK_STS_IOERR; + } + return BLK_STS_RESOURCE; /* try again later */ + } + + return BLK_STS_OK; +} + #endif /* _NVME_FABRICS_H */ diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 0ebb539f3bd3a7..6de163e6c9ebab 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1603,31 +1603,11 @@ nvme_rdma_timeout(struct request *rq, bool reserved) * We cannot accept any other command until the Connect command has completed. */ static inline blk_status_t -nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq) -{ - if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { - struct nvme_command *cmd = nvme_req(rq)->cmd; - - if (!blk_rq_is_passthrough(rq) || - cmd->common.opcode != nvme_fabrics_command || - cmd->fabrics.fctype != nvme_fabrics_type_connect) { - /* - * reconnecting state means transport disruption, which - * can take a long time and even might fail permanently, - * fail fast to give upper layers a chance to failover. - * deleting state means that the ctrl will never accept - * commands again, fail it permanently. - */ - if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING || - queue->ctrl->ctrl.state == NVME_CTRL_DELETING) { - nvme_req(rq)->status = NVME_SC_ABORT_REQ; - return BLK_STS_IOERR; - } - return BLK_STS_RESOURCE; /* try again later */ - } - } - - return 0; +nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq) +{ + if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) + return nvmf_check_init_req(&queue->ctrl->ctrl, rq); + return BLK_STS_OK; } static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, @@ -1646,7 +1626,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, WARN_ON_ONCE(rq->tag < 0); - ret = nvme_rdma_queue_is_ready(queue, rq); + ret = nvme_rdma_is_ready(queue, rq); if (unlikely(ret)) return ret; From db2044fc42305d2f8f92f24e16f223b66dff6cfe Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 24 Oct 2017 15:25:21 +0300 Subject: [PATCH 1584/3326] nvme-fc: check if queue is ready in queue_rq [ Upstream commit 9e0ed16ab9a9aaf670b81c9cd05b5e50defed654 ] In case the queue is not LIVE (fully functional and connected at the nvmf level), we cannot allow any commands other than connect to pass through. Add a new queue state flag NVME_FC_Q_LIVE which is set after nvmf connect and cleared in queue teardown. Signed-off-by: Sagi Grimberg Reviewed-by: James Smart Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/host/fc.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index be49d0f793816c..3148d760d8251d 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -41,6 +41,7 @@ enum nvme_fc_queue_flags { NVME_FC_Q_CONNECTED = (1 << 0), + NVME_FC_Q_LIVE = (1 << 1), }; #define NVMEFC_QUEUE_DELAY 3 /* ms units */ @@ -1654,6 +1655,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) return; + clear_bit(NVME_FC_Q_LIVE, &queue->flags); /* * Current implementation never disconnects a single queue. * It always terminates a whole association. So there is never @@ -1661,7 +1663,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) */ queue->connection_id = 0; - clear_bit(NVME_FC_Q_CONNECTED, &queue->flags); } static void @@ -1740,6 +1741,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) break; + + set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); } return ret; @@ -2048,6 +2051,14 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, return BLK_STS_RESOURCE; } +static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue, + struct request *rq) +{ + if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags))) + return nvmf_check_init_req(&queue->ctrl->ctrl, rq); + return BLK_STS_OK; +} + static blk_status_t nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) @@ -2063,6 +2074,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, u32 data_len; blk_status_t ret; + ret = nvme_fc_is_ready(queue, rq); + if (unlikely(ret)) + return ret; + ret = nvme_setup_cmd(ns, rq, sqe); if (ret) return ret; @@ -2398,6 +2413,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (ret) goto out_disconnect_admin_queue; + set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); + /* * Check controller capabilities * From 7af5f9137c93bd69384a3d12d9f741b63f4d98c4 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 24 Oct 2017 15:25:22 +0300 Subject: [PATCH 1585/3326] nvme-loop: check if queue is ready in queue_rq [ Upstream commit 9d7fab04b95e8c26014a9bfc1c943b8360b44c17 ] In case the queue is not LIVE (fully functional and connected at the nvmf level), we cannot allow any commands other than connect to pass through. Add a new queue state flag NVME_LOOP_Q_LIVE which is set after nvmf connect and cleared in queue teardown. Signed-off-by: Sagi Grimberg Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/target/loop.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 92628c4329262c..02aff5cc48bf2c 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -61,10 +61,15 @@ static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) return container_of(ctrl, struct nvme_loop_ctrl, ctrl); } +enum nvme_loop_queue_flags { + NVME_LOOP_Q_LIVE = 0, +}; + struct nvme_loop_queue { struct nvmet_cq nvme_cq; struct nvmet_sq nvme_sq; struct nvme_loop_ctrl *ctrl; + unsigned long flags; }; static struct nvmet_port *nvmet_loop_port; @@ -153,6 +158,14 @@ nvme_loop_timeout(struct request *rq, bool reserved) return BLK_EH_HANDLED; } +static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue, + struct request *rq) +{ + if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags))) + return nvmf_check_init_req(&queue->ctrl->ctrl, rq); + return BLK_STS_OK; +} + static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -162,6 +175,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret; + ret = nvme_loop_is_ready(queue, req); + if (unlikely(ret)) + return ret; + ret = nvme_setup_cmd(ns, req, &iod->cmd); if (ret) return ret; @@ -275,6 +292,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = { static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) { + clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); blk_cleanup_queue(ctrl->ctrl.admin_q); blk_mq_free_tag_set(&ctrl->admin_tag_set); @@ -305,8 +323,10 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) { int i; - for (i = 1; i < ctrl->ctrl.queue_count; i++) + for (i = 1; i < ctrl->ctrl.queue_count; i++) { + clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); + } } static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) @@ -346,6 +366,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) return ret; + set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); } return 0; @@ -387,6 +408,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) if (error) goto out_cleanup_queue; + set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); + error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap); if (error) { dev_err(ctrl->ctrl.device, From 128dc55f891c79535dbf278e332f0f8fedfa7469 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Thu, 9 Nov 2017 01:12:03 -0500 Subject: [PATCH 1586/3326] nvme-pci: disable APST on Samsung SSD 960 EVO + ASUS PRIME B350M-A [ Upstream commit 8427bbc224863e14d905c87920d4005cb3e88ac3 ] The NVMe device in question drops off the PCIe bus after system suspend. I've tried several approaches to workaround this issue, but none of them works: - NVME_QUIRK_DELAY_BEFORE_CHK_RDY - NVME_QUIRK_NO_DEEPEST_PS - Disable APST before controller shutdown - Delay between controller shutdown and system suspend - Explicitly set power state to 0 before controller shutdown Fortunately it's a desktop, so disable APST won't hurt the battery. Also, change the quirk function name to reflect it's for vendor combination quirks. BugLink: https://bugs.launchpad.net/bugs/1705748 Signed-off-by: Kai-Heng Feng Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/host/pci.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 75539f7c58b9a4..66d872b050241f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2282,7 +2282,7 @@ static int nvme_dev_map(struct nvme_dev *dev) return -ENODEV; } -static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) +static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) { if (pdev->vendor == 0x144d && pdev->device == 0xa802) { /* @@ -2297,6 +2297,14 @@ static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) return NVME_QUIRK_NO_DEEPEST_PS; + } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { + /* + * Samsung SSD 960 EVO drops off the PCIe bus after system + * suspend on a Ryzen board, ASUS PRIME B350M-A. + */ + if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && + dmi_match(DMI_BOARD_NAME, "PRIME B350M-A")) + return NVME_QUIRK_NO_APST; } return 0; @@ -2336,7 +2344,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (result) goto unmap; - quirks |= check_dell_samsung_bug(pdev); + quirks |= check_vendor_combination_bug(pdev); result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, quirks); From 93a4bcf2c4252a518a128df1d017a7d5b1a814c2 Mon Sep 17 00:00:00 2001 From: Minwoo Im Date: Fri, 17 Nov 2017 01:34:24 +0900 Subject: [PATCH 1587/3326] nvme-pci: avoid hmb desc array idx out-of-bound when hmmaxd set. [ Upstream commit 244a8fe40a09c218622eb9927b9090b0a9b73a1a ] hmb descriptor idx out-of-bound occurs in case of below conditions. preferred = 128MiB chunk_size = 4MiB hmmaxd = 1 Current code will not allow rmmod which will free hmb descriptors to be done successfully in above case. "descs[i]" will be set in for-loop without seeing any conditions related to "max_entries" after a single "descs" was allocated by (max_entries = 1) in this case. Added a condition into for-loop to check index of descriptors. Fixes: 044a9df1("nvme-pci: implement the HMB entry number and size limitations") Signed-off-by: Minwoo Im Reviewed-by: Keith Busch Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/host/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 66d872b050241f..5dae650438c17d 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1645,7 +1645,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, if (!bufs) goto out_free_descs; - for (size = 0; size < preferred; size += len) { + for (size = 0; size < preferred && i < max_entries; size += len) { dma_addr_t dma_addr; len = min_t(u64, chunk_size, preferred - size); From 71686d2a17592a79c7174efad05ca79d3270b30d Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 10 Nov 2017 15:38:45 -0800 Subject: [PATCH 1588/3326] nvmet-fc: correct ref counting error when deferred rcv used [ Upstream commit 619c62dcc62b957d17cccde2081cad527b020883 ] Whenever a cmd is received a reference is taken while looking up the queue. The reference is removed after the cmd is done as the iod is returned for reuse. The fod may be reused for a deferred (recevied but no job context) cmd. Existing code removes the reference only if the fod is not reused for another command. Given the fod may be used for one or more ios, although a reference was taken per io, it won't be matched on the frees. Remove the reference on every fod free. This pairs the references to each io. Signed-off-by: James Smart Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/target/fc.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 58e010bdda3ea5..8e21211b904b32 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -532,15 +532,15 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); + /* release the queue lookup reference on the completed IO */ + nvmet_fc_tgt_q_put(queue); + spin_lock_irqsave(&queue->qlock, flags); deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, struct nvmet_fc_defer_fcp_req, req_list); if (!deferfcp) { list_add_tail(&fod->fcp_list, &fod->queue->fod_list); spin_unlock_irqrestore(&queue->qlock, flags); - - /* Release reference taken at queue lookup and fod allocation */ - nvmet_fc_tgt_q_put(queue); return; } @@ -759,6 +759,9 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) tgtport->ops->fcp_req_release(&tgtport->fc_target_port, deferfcp->fcp_req); + /* release the queue lookup reference */ + nvmet_fc_tgt_q_put(queue); + kfree(deferfcp); spin_lock_irqsave(&queue->qlock, flags); From 18c128456eacb6fc022264f178c3a821180854c2 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Thu, 16 Nov 2017 14:26:36 +0100 Subject: [PATCH 1589/3326] s390/topology: fix compile error in file arch/s390/kernel/smp.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 38389ec84e835fa31a59b7dabb18343106a6d0d5 ] Commit 1887aa07b676 ("s390/topology: add detection of dedicated vs shared CPUs") introduced following compiler error when CONFIG_SCHED_TOPOLOGY is not set. CC arch/s390/kernel/smp.o ... arch/s390/kernel/smp.c: In function ‘smp_start_secondary’: arch/s390/kernel/smp.c:812:6: error: implicit declaration of function ‘topology_cpu_dedicated’; did you mean ‘topology_cpu_init’? This patch fixes the compiler error by adding function topology_cpu_dedicated() to return false when this config option is not defined. Signed-off-by: Thomas Richter Reviewed-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/s390/include/asm/topology.h | 1 + arch/s390/kernel/smp.c | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 55de4eb73604d9..de0a8b17bcaa23 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -51,6 +51,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu); static inline void topology_init_early(void) { } static inline void topology_schedule_update(void) { } static inline int topology_cpu_init(struct cpu *cpu) { return 0; } +static inline int topology_cpu_dedicated(int cpu_nr) { return 0; } static inline void topology_expect_change(void) { } #endif /* CONFIG_SCHED_TOPOLOGY */ diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 092c4154abd74c..7ffaf9fd6d1979 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -54,6 +54,7 @@ #include #include #include +#include #include "entry.h" enum { From 82b90de5b111deaa09ec61594a6df84a426dceba Mon Sep 17 00:00:00 2001 From: Harald Freudenberger Date: Fri, 17 Nov 2017 16:32:22 +0100 Subject: [PATCH 1590/3326] s390/zcrypt: Fix wrong comparison leading to strange load balancing [ Upstream commit 0b0882672640ced4deeebf84da0b88b6389619c4 ] The function to decide if one zcrypt queue is better than another one compared two pointers instead of comparing the values where the pointers refer to. So within the same zcrypt card when load of each queue was equal just one queue was used. This effect only appears on relatively lite load, typically with one thread applications. This patch fixes the wrong comparison and now the counters show that requests are balanced equally over all available queues within the cards. There is no performance improvement coming with this fix. As long as the queue depth for an APQN queue is not touched, processing is not faster when requests are spread over queues within the same card hardware. So this fix only beautifies the lszcrypt counter printouts. Signed-off-by: Harald Freudenberger Signed-off-by: Martin Schwidefsky Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/s390/crypto/zcrypt_api.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index b5f4006198b9e0..a9a56aa9c26b7e 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -218,8 +218,8 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, weight += atomic_read(&zq->load); pref_weight += atomic_read(&pref_zq->load); if (weight == pref_weight) - return &zq->queue->total_request_count > - &pref_zq->queue->total_request_count; + return zq->queue->total_request_count > + pref_zq->queue->total_request_count; return weight > pref_weight; } From cb78d818c30d4fa700d24baf1d91d6337da272db Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 15 Oct 2017 21:24:49 +0200 Subject: [PATCH 1591/3326] ACPI / bus: Leave modalias empty for devices which are not present [ Upstream commit 10809bb976648ac58194a629e3d7af99e7400297 ] Most Bay and Cherry Trail devices use a generic DSDT with all possible peripheral devices present in the DSDT, with their _STA returning 0x00 or 0x0f based on AML variables which describe what is actually present on the board. Since ACPI device objects with a 0x00 status (not present) still get an entry under /sys/bus/acpi/devices, and those entry had an acpi:PNPID modalias, userspace would end up loading modules for non present hardware. This commit fixes this by leaving the modalias empty for non present devices. This results in 10 modules less being loaded with a generic distro kernel config on my Cherry Trail test-device (a GPD pocket). Signed-off-by: Hans de Goede Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/device_sysfs.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index 24418932612eea..a041689e5701d7 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, int count; struct acpi_hardware_id *id; + /* Avoid unnecessarily loading modules for non present devices. */ + if (!acpi_device_is_present(acpi_dev)) + return 0; + /* * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the From d290178f00d483df27dc17bf41797355bd952d9d Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 15 Nov 2017 21:17:55 +0000 Subject: [PATCH 1592/3326] cpufreq: Add Loongson machine dependencies [ Upstream commit 0d307935fefa6389eb726c6362351c162c949101 ] The MIPS loongson cpufreq drivers don't build unless configured for the correct machine type, due to dependency on machine specific architecture headers and symbols in machine specific platform code. More specifically loongson1-cpufreq.c uses RST_CPU_EN and RST_CPU, neither of which is defined in asm/mach-loongson32/regs-clk.h unless CONFIG_LOONGSON1_LS1B=y, and loongson2_cpufreq.c references loongson2_clockmod_table[], which is only defined in arch/mips/loongson64/lemote-2f/clock.c, i.e. when CONFIG_LEMOTE_MACH2F=y. Add these dependencies to Kconfig to avoid randconfig / allyesconfig build failures (e.g. when based on BMIPS which also has a cpufreq driver). Signed-off-by: James Hogan Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/cpufreq/Kconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 4ebae43118effe..d8addbce40bcc4 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -275,6 +275,7 @@ config BMIPS_CPUFREQ config LOONGSON2_CPUFREQ tristate "Loongson2 CPUFreq Driver" + depends on LEMOTE_MACH2F help This option adds a CPUFreq driver for loongson processors which support software configurable cpu frequency. @@ -287,6 +288,7 @@ config LOONGSON2_CPUFREQ config LOONGSON1_CPUFREQ tristate "Loongson1 CPUFreq Driver" + depends on LOONGSON1_LS1B help This option adds a CPUFreq driver for loongson1 processors which support software configurable cpu frequency. From 2e194c9c55964455366e9e1b93fe02f932ecf346 Mon Sep 17 00:00:00 2001 From: David Disseldorp Date: Wed, 8 Nov 2017 17:29:44 +0100 Subject: [PATCH 1593/3326] null_blk: fix dev->badblocks leak [ Upstream commit 1addb798e93893d33c8dfab743cd44f09fd7719a ] null_alloc_dev() allocates memory for dev->badblocks, but cleanup currently only occurs in the configfs release codepath, missing a number of other places. This bug was found running the blktests block/010 test, alongside kmemleak: rapido1:/blktests# ./check block/010 ... rapido1:/blktests# echo scan > /sys/kernel/debug/kmemleak [ 306.966708] kmemleak: 32 new suspected memory leaks (see /sys/kernel/debug/kmemleak) rapido1:/blktests# cat /sys/kernel/debug/kmemleak unreferenced object 0xffff88001f86d000 (size 4096): comm "modprobe", pid 231, jiffies 4294892415 (age 318.252s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [] kmemleak_alloc+0x49/0xa0 [] kmem_cache_alloc+0x9f/0xe0 [] badblocks_init+0x2f/0x60 [] 0xffffffffa0019fae [] nullb_device_badblocks_store+0x63/0x130 [null_blk] [] do_one_initcall+0x3d/0x170 [] do_init_module+0x56/0x1e9 [] load_module+0x1c47/0x26a0 [] SyS_finit_module+0xa9/0xd0 [] entry_SYSCALL_64_fastpath+0x13/0x94 Fixes: 2f54a613c942 ("nullb: badbblocks support") Reviewed-by: Shaohua Li Signed-off-by: David Disseldorp Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/block/null_blk.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 4d55af5c6e5b56..69dfa1d3f45353 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -467,7 +467,6 @@ static void nullb_device_release(struct config_item *item) { struct nullb_device *dev = to_nullb_device(item); - badblocks_exit(&dev->badblocks); null_free_device_storage(dev, false); null_free_dev(dev); } @@ -578,6 +577,10 @@ static struct nullb_device *null_alloc_dev(void) static void null_free_dev(struct nullb_device *dev) { + if (!dev) + return; + + badblocks_exit(&dev->badblocks); kfree(dev); } From e2443fb287cc52a98a3b6f417a5e46bf2383e5ba Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 22 Nov 2017 17:19:32 +0100 Subject: [PATCH 1594/3326] s390: fix alloc_pgste check in init_new_context again [ Upstream commit 53c4ab70c11c3ba1b9e3caa8e8c17e9c16d9cbc0 ] git commit badb8bb983e9 "fix alloc_pgste check in init_new_context" fixed the problem of 'current->mm == NULL' in init_new_context back in 2011. git commit 3eabaee998c7 "KVM: s390: allow sie enablement for multi- threaded programs" completely removed the check against alloc_pgste. git commit 23fefe119ceb "s390/kvm: avoid global config of vm.alloc_pgste=1" re-added a check against the alloc_pgste flag but without the required check for current->mm != NULL. For execve() called by a kernel thread init_new_context() reads from ((struct mm_struct *) NULL)->context.alloc_pgste to decide between 2K vs 4K page tables. If the bit happens to be set for the init process it will be created with large page tables. This decision is inherited by all the children of init, this waste quite some memory. Re-add the check for 'current->mm != NULL'. Fixes: 23fefe119ceb ("s390/kvm: avoid global config of vm.alloc_pgste=1") Signed-off-by: Martin Schwidefsky Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/s390/include/asm/mmu_context.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 43607bb12cc2b3..a6cc744ff5fb12 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -28,7 +28,7 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_PGSTE mm->context.alloc_pgste = page_table_allocate_pgste || test_thread_flag(TIF_PGSTE) || - current->mm->context.alloc_pgste; + (current->mm && current->mm->context.alloc_pgste); mm->context.has_pgste = 0; mm->context.use_skey = 0; mm->context.use_cmma = 0; From 92c131bebf4737866ffb31aeed45085ce827e0af Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 24 Nov 2017 10:18:40 +0000 Subject: [PATCH 1595/3326] rxrpc: The mutex lock returned by rxrpc_accept_call() needs releasing [ Upstream commit 03a6c82218b9a87014b2c6c4e178294fdc8ebd8a ] The caller of rxrpc_accept_call() must release the lock on call->user_mutex returned by that function. Signed-off-by: David Howells Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/rxrpc/sendmsg.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 9ea6f972767e7c..d2f51d6a253c2e 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -563,8 +563,8 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) /* The socket is now unlocked. */ if (IS_ERR(call)) return PTR_ERR(call); - rxrpc_put_call(call, rxrpc_call_put); - return 0; + ret = 0; + goto out_put_unlock; } call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID); @@ -633,6 +633,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ret = rxrpc_send_data(rx, call, msg, len, NULL); } +out_put_unlock: mutex_unlock(&call->user_mutex); error_put: rxrpc_put_call(call, rxrpc_call_put); From b89372f234a83d64a8f7e80fdcfadb64d9a9aeca Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 24 Nov 2017 10:18:40 +0000 Subject: [PATCH 1596/3326] rxrpc: Provide a different lockdep key for call->user_mutex for kernel calls [ Upstream commit 9faaff593404a9c4e5abc6839a641635d7b9d0cd ] Provide a different lockdep key for rxrpc_call::user_mutex when the call is made on a kernel socket, such as by the AFS filesystem. The problem is that lockdep registers a false positive between userspace calling the sendmsg syscall on a user socket where call->user_mutex is held whilst userspace memory is accessed whereas the AFS filesystem may perform operations with mmap_sem held by the caller. In such a case, the following warning is produced. ====================================================== WARNING: possible circular locking dependency detected 4.14.0-fscache+ #243 Tainted: G E ------------------------------------------------------ modpost/16701 is trying to acquire lock: (&vnode->io_lock){+.+.}, at: [] afs_begin_vnode_operation+0x33/0x77 [kafs] but task is already holding lock: (&mm->mmap_sem){++++}, at: [] __do_page_fault+0x1ef/0x486 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #3 (&mm->mmap_sem){++++}: __might_fault+0x61/0x89 _copy_from_iter_full+0x40/0x1fa rxrpc_send_data+0x8dc/0xff3 rxrpc_do_sendmsg+0x62f/0x6a1 rxrpc_sendmsg+0x166/0x1b7 sock_sendmsg+0x2d/0x39 ___sys_sendmsg+0x1ad/0x22b __sys_sendmsg+0x41/0x62 do_syscall_64+0x89/0x1be return_from_SYSCALL_64+0x0/0x75 -> #2 (&call->user_mutex){+.+.}: __mutex_lock+0x86/0x7d2 rxrpc_new_client_call+0x378/0x80e rxrpc_kernel_begin_call+0xf3/0x154 afs_make_call+0x195/0x454 [kafs] afs_vl_get_capabilities+0x193/0x198 [kafs] afs_vl_lookup_vldb+0x5f/0x151 [kafs] afs_create_volume+0x2e/0x2f4 [kafs] afs_mount+0x56a/0x8d7 [kafs] mount_fs+0x6a/0x109 vfs_kern_mount+0x67/0x135 do_mount+0x90b/0xb57 SyS_mount+0x72/0x98 do_syscall_64+0x89/0x1be return_from_SYSCALL_64+0x0/0x75 -> #1 (k-sk_lock-AF_RXRPC){+.+.}: lock_sock_nested+0x74/0x8a rxrpc_kernel_begin_call+0x8a/0x154 afs_make_call+0x195/0x454 [kafs] afs_fs_get_capabilities+0x17a/0x17f [kafs] afs_probe_fileserver+0xf7/0x2f0 [kafs] afs_select_fileserver+0x83f/0x903 [kafs] afs_fetch_status+0x89/0x11d [kafs] afs_iget+0x16f/0x4f8 [kafs] afs_mount+0x6c6/0x8d7 [kafs] mount_fs+0x6a/0x109 vfs_kern_mount+0x67/0x135 do_mount+0x90b/0xb57 SyS_mount+0x72/0x98 do_syscall_64+0x89/0x1be return_from_SYSCALL_64+0x0/0x75 -> #0 (&vnode->io_lock){+.+.}: lock_acquire+0x174/0x19f __mutex_lock+0x86/0x7d2 afs_begin_vnode_operation+0x33/0x77 [kafs] afs_fetch_data+0x80/0x12a [kafs] afs_readpages+0x314/0x405 [kafs] __do_page_cache_readahead+0x203/0x2ba filemap_fault+0x179/0x54d __do_fault+0x17/0x60 __handle_mm_fault+0x6d7/0x95c handle_mm_fault+0x24e/0x2a3 __do_page_fault+0x301/0x486 do_page_fault+0x236/0x259 page_fault+0x22/0x30 __clear_user+0x3d/0x60 padzero+0x1c/0x2b load_elf_binary+0x785/0xdc7 search_binary_handler+0x81/0x1ff do_execveat_common.isra.14+0x600/0x888 do_execve+0x1f/0x21 SyS_execve+0x28/0x2f do_syscall_64+0x89/0x1be return_from_SYSCALL_64+0x0/0x75 other info that might help us debug this: Chain exists of: &vnode->io_lock --> &call->user_mutex --> &mm->mmap_sem Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&mm->mmap_sem); lock(&call->user_mutex); lock(&mm->mmap_sem); lock(&vnode->io_lock); *** DEADLOCK *** 1 lock held by modpost/16701: #0: (&mm->mmap_sem){++++}, at: [] __do_page_fault+0x1ef/0x486 stack backtrace: CPU: 0 PID: 16701 Comm: modpost Tainted: G E 4.14.0-fscache+ #243 Hardware name: ASUS All Series/H97-PLUS, BIOS 2306 10/09/2014 Call Trace: dump_stack+0x67/0x8e print_circular_bug+0x341/0x34f check_prev_add+0x11f/0x5d4 ? add_lock_to_list.isra.12+0x8b/0x8b ? add_lock_to_list.isra.12+0x8b/0x8b ? __lock_acquire+0xf77/0x10b4 __lock_acquire+0xf77/0x10b4 lock_acquire+0x174/0x19f ? afs_begin_vnode_operation+0x33/0x77 [kafs] __mutex_lock+0x86/0x7d2 ? afs_begin_vnode_operation+0x33/0x77 [kafs] ? afs_begin_vnode_operation+0x33/0x77 [kafs] ? afs_begin_vnode_operation+0x33/0x77 [kafs] afs_begin_vnode_operation+0x33/0x77 [kafs] afs_fetch_data+0x80/0x12a [kafs] afs_readpages+0x314/0x405 [kafs] __do_page_cache_readahead+0x203/0x2ba ? filemap_fault+0x179/0x54d filemap_fault+0x179/0x54d __do_fault+0x17/0x60 __handle_mm_fault+0x6d7/0x95c handle_mm_fault+0x24e/0x2a3 __do_page_fault+0x301/0x486 do_page_fault+0x236/0x259 page_fault+0x22/0x30 RIP: 0010:__clear_user+0x3d/0x60 RSP: 0018:ffff880071e93da0 EFLAGS: 00010202 RAX: 0000000000000000 RBX: 000000000000011c RCX: 000000000000011c RDX: 0000000000000000 RSI: 0000000000000008 RDI: 000000000060f720 RBP: 000000000060f720 R08: 0000000000000001 R09: 0000000000000000 R10: 0000000000000001 R11: ffff8800b5459b68 R12: ffff8800ce150e00 R13: 000000000060f720 R14: 00000000006127a8 R15: 0000000000000000 padzero+0x1c/0x2b load_elf_binary+0x785/0xdc7 search_binary_handler+0x81/0x1ff do_execveat_common.isra.14+0x600/0x888 do_execve+0x1f/0x21 SyS_execve+0x28/0x2f do_syscall_64+0x89/0x1be entry_SYSCALL64_slow_path+0x25/0x25 RIP: 0033:0x7fdb6009ee07 RSP: 002b:00007fff566d9728 EFLAGS: 00000246 ORIG_RAX: 000000000000003b RAX: ffffffffffffffda RBX: 000055ba57280900 RCX: 00007fdb6009ee07 RDX: 000055ba5727f270 RSI: 000055ba5727cac0 RDI: 000055ba57280900 RBP: 000055ba57280900 R08: 00007fff566d9700 R09: 0000000000000000 R10: 000055ba5727cac0 R11: 0000000000000246 R12: 0000000000000000 R13: 000055ba5727cac0 R14: 000055ba5727f270 R15: 0000000000000000 Signed-off-by: David Howells Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/rxrpc/ar-internal.h | 2 +- net/rxrpc/call_accept.c | 2 +- net/rxrpc/call_object.c | 19 +++++++++++++++---- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index ea5600b747ccd2..2b3992c5060e12 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -671,7 +671,7 @@ extern unsigned int rxrpc_max_call_lifetime; extern struct kmem_cache *rxrpc_call_jar; struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); -struct rxrpc_call *rxrpc_alloc_call(gfp_t); +struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t); struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *, diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index cbd1701e813a76..3028298ca56134 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -94,7 +94,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, /* Now it gets complicated, because calls get registered with the * socket here, particularly if a user ID is preassigned by the user. */ - call = rxrpc_alloc_call(gfp); + call = rxrpc_alloc_call(rx, gfp); if (!call) return -ENOMEM; call->flags |= (1 << RXRPC_CALL_IS_SERVICE); diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index fcdd6555a820ed..8a5a42e8ec230a 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -55,6 +55,8 @@ static void rxrpc_call_timer_expired(unsigned long _call) rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real()); } +static struct lock_class_key rxrpc_call_user_mutex_lock_class_key; + /* * find an extant server call * - called in process context with IRQs enabled @@ -95,7 +97,7 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, /* * allocate a new call */ -struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) +struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp) { struct rxrpc_call *call; @@ -114,6 +116,14 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) goto nomem_2; mutex_init(&call->user_mutex); + + /* Prevent lockdep reporting a deadlock false positive between the afs + * filesystem and sys_sendmsg() via the mmap sem. + */ + if (rx->sk.sk_kern_sock) + lockdep_set_class(&call->user_mutex, + &rxrpc_call_user_mutex_lock_class_key); + setup_timer(&call->timer, rxrpc_call_timer_expired, (unsigned long)call); INIT_WORK(&call->processor, &rxrpc_process_call); @@ -150,7 +160,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) /* * Allocate a new client call. */ -static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx, +static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, + struct sockaddr_rxrpc *srx, gfp_t gfp) { struct rxrpc_call *call; @@ -158,7 +169,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx, _enter(""); - call = rxrpc_alloc_call(gfp); + call = rxrpc_alloc_call(rx, gfp); if (!call) return ERR_PTR(-ENOMEM); call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; @@ -209,7 +220,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, _enter("%p,%lx", rx, user_call_ID); - call = rxrpc_alloc_client_call(srx, gfp); + call = rxrpc_alloc_client_call(rx, srx, gfp); if (IS_ERR(call)) { release_sock(&rx->sk); _leave(" = %ld", PTR_ERR(call)); From 1392633bafde919b3d78449b8a1a1c9f50f2b72e Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 24 Nov 2017 10:18:42 +0000 Subject: [PATCH 1597/3326] rxrpc: Fix service endpoint expiry [ Upstream commit f859ab61875978eeaa539740ff7f7d91f5d60006 ] RxRPC service endpoints expire like they're supposed to by the following means: (1) Mark dead rxrpc_net structs (with ->live) rather than twiddling the global service conn timeout, otherwise the first rxrpc_net struct to die will cause connections on all others to expire immediately from then on. (2) Mark local service endpoints for which the socket has been closed (->service_closed) so that the expiration timeout can be much shortened for service and client connections going through that endpoint. (3) rxrpc_put_service_conn() needs to schedule the reaper when the usage count reaches 1, not 0, as idle conns have a 1 count. (4) The accumulator for the earliest time we might want to schedule for should be initialised to jiffies + MAX_JIFFY_OFFSET, not ULONG_MAX as the comparison functions use signed arithmetic. (5) Simplify the expiration handling, adding the expiration value to the idle timestamp each time rather than keeping track of the time in the past before which the idle timestamp must go to be expired. This is much easier to read. (6) Ignore the timeouts if the net namespace is dead. (7) Restart the service reaper work item rather the client reaper. Signed-off-by: David Howells Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/trace/events/rxrpc.h | 2 ++ net/rxrpc/af_rxrpc.c | 13 +++++++++++ net/rxrpc/ar-internal.h | 3 +++ net/rxrpc/conn_client.c | 2 ++ net/rxrpc/conn_object.c | 42 ++++++++++++++++++++---------------- net/rxrpc/net_ns.c | 3 +++ 6 files changed, 47 insertions(+), 18 deletions(-) diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index ebe96796027aef..a58caf5807ffc5 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -49,6 +49,7 @@ enum rxrpc_conn_trace { rxrpc_conn_put_client, rxrpc_conn_put_service, rxrpc_conn_queued, + rxrpc_conn_reap_service, rxrpc_conn_seen, }; @@ -206,6 +207,7 @@ enum rxrpc_congest_change { EM(rxrpc_conn_put_client, "PTc") \ EM(rxrpc_conn_put_service, "PTs") \ EM(rxrpc_conn_queued, "QUE") \ + EM(rxrpc_conn_reap_service, "RPs") \ E_(rxrpc_conn_seen, "SEE") #define rxrpc_client_traces \ diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 4b0a8288c98a65..7c1cb08874d515 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -823,6 +823,19 @@ static int rxrpc_release_sock(struct sock *sk) sock_orphan(sk); sk->sk_shutdown = SHUTDOWN_MASK; + /* We want to kill off all connections from a service socket + * as fast as possible because we can't share these; client + * sockets, on the other hand, can share an endpoint. + */ + switch (sk->sk_state) { + case RXRPC_SERVER_BOUND: + case RXRPC_SERVER_BOUND2: + case RXRPC_SERVER_LISTENING: + case RXRPC_SERVER_LISTEN_DISABLED: + rx->local->service_closed = true; + break; + } + spin_lock_bh(&sk->sk_receive_queue.lock); sk->sk_state = RXRPC_CLOSE; spin_unlock_bh(&sk->sk_receive_queue.lock); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 2b3992c5060e12..e6c2c4f56fb121 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -84,6 +84,7 @@ struct rxrpc_net { unsigned int nr_client_conns; unsigned int nr_active_client_conns; bool kill_all_client_conns; + bool live; spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */ spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */ struct list_head waiting_client_conns; @@ -265,6 +266,7 @@ struct rxrpc_local { rwlock_t services_lock; /* lock for services list */ int debug_id; /* debug ID for printks */ bool dead; + bool service_closed; /* Service socket closed */ struct sockaddr_rxrpc srx; /* local address */ }; @@ -824,6 +826,7 @@ void rxrpc_process_connection(struct work_struct *); * conn_object.c */ extern unsigned int rxrpc_connection_expiry; +extern unsigned int rxrpc_closed_conn_expiry; struct rxrpc_connection *rxrpc_alloc_connection(gfp_t); struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *, diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 5f9624bd311c68..78a154173d9049 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -1061,6 +1061,8 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work) expiry = rxrpc_conn_idle_client_expiry; if (nr_conns > rxrpc_reap_client_connections) expiry = rxrpc_conn_idle_client_fast_expiry; + if (conn->params.local->service_closed) + expiry = rxrpc_closed_conn_expiry * HZ; conn_expires_at = conn->idle_timestamp + expiry; diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index fe575798592fec..a48c817b792b75 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -20,7 +20,8 @@ /* * Time till a connection expires after last use (in seconds). */ -unsigned int rxrpc_connection_expiry = 10 * 60; +unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60; +unsigned int __read_mostly rxrpc_closed_conn_expiry = 10; static void rxrpc_destroy_connection(struct rcu_head *); @@ -312,7 +313,7 @@ void rxrpc_put_service_conn(struct rxrpc_connection *conn) n = atomic_dec_return(&conn->usage); trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here); ASSERTCMP(n, >=, 0); - if (n == 0) { + if (n == 1) { rxnet = conn->params.local->rxnet; rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0); } @@ -353,15 +354,14 @@ void rxrpc_service_connection_reaper(struct work_struct *work) struct rxrpc_net *rxnet = container_of(to_delayed_work(work), struct rxrpc_net, service_conn_reaper); - unsigned long reap_older_than, earliest, idle_timestamp, now; + unsigned long expire_at, earliest, idle_timestamp, now; LIST_HEAD(graveyard); _enter(""); now = jiffies; - reap_older_than = now - rxrpc_connection_expiry * HZ; - earliest = ULONG_MAX; + earliest = now + MAX_JIFFY_OFFSET; write_lock(&rxnet->conn_lock); list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { @@ -371,15 +371,21 @@ void rxrpc_service_connection_reaper(struct work_struct *work) if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) continue; - idle_timestamp = READ_ONCE(conn->idle_timestamp); - _debug("reap CONN %d { u=%d,t=%ld }", - conn->debug_id, atomic_read(&conn->usage), - (long)reap_older_than - (long)idle_timestamp); - - if (time_after(idle_timestamp, reap_older_than)) { - if (time_before(idle_timestamp, earliest)) - earliest = idle_timestamp; - continue; + if (rxnet->live) { + idle_timestamp = READ_ONCE(conn->idle_timestamp); + expire_at = idle_timestamp + rxrpc_connection_expiry * HZ; + if (conn->params.local->service_closed) + expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ; + + _debug("reap CONN %d { u=%d,t=%ld }", + conn->debug_id, atomic_read(&conn->usage), + (long)expire_at - (long)now); + + if (time_before(now, expire_at)) { + if (time_before(expire_at, earliest)) + earliest = expire_at; + continue; + } } /* The usage count sits at 1 whilst the object is unused on the @@ -387,6 +393,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work) */ if (atomic_cmpxchg(&conn->usage, 1, 0) != 1) continue; + trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0); if (rxrpc_conn_is_client(conn)) BUG(); @@ -397,10 +404,10 @@ void rxrpc_service_connection_reaper(struct work_struct *work) } write_unlock(&rxnet->conn_lock); - if (earliest != ULONG_MAX) { - _debug("reschedule reaper %ld", (long) earliest - now); + if (earliest != now + MAX_JIFFY_OFFSET) { + _debug("reschedule reaper %ld", (long)earliest - (long)now); ASSERT(time_after(earliest, now)); - rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, + rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, earliest - now); } @@ -429,7 +436,6 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet) rxrpc_destroy_all_client_connections(rxnet); - rxrpc_connection_expiry = 0; cancel_delayed_work(&rxnet->client_conn_reaper); rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0); flush_workqueue(rxrpc_workqueue); diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c index 7edceb8522f5b3..684c51d600c7f5 100644 --- a/net/rxrpc/net_ns.c +++ b/net/rxrpc/net_ns.c @@ -22,6 +22,7 @@ static __net_init int rxrpc_init_net(struct net *net) struct rxrpc_net *rxnet = rxrpc_net(net); int ret; + rxnet->live = true; get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch)); rxnet->epoch |= RXRPC_RANDOM_EPOCH; @@ -60,6 +61,7 @@ static __net_init int rxrpc_init_net(struct net *net) return 0; err_proc: + rxnet->live = false; return ret; } @@ -70,6 +72,7 @@ static __net_exit void rxrpc_exit_net(struct net *net) { struct rxrpc_net *rxnet = rxrpc_net(net); + rxnet->live = false; rxrpc_destroy_all_calls(rxnet); rxrpc_destroy_all_connections(rxnet); rxrpc_destroy_all_locals(rxnet); From f9f175778bb9e4a5fe6d46efde1b353f15fde1fa Mon Sep 17 00:00:00 2001 From: Michael Lyle Date: Fri, 24 Nov 2017 15:14:27 -0800 Subject: [PATCH 1598/3326] bcache: check return value of register_shrinker [ Upstream commit 6c4ca1e36cdc1a0a7a84797804b87920ccbebf51 ] register_shrinker is now __must_check, so check it to kill a warning. Caller of bch_btree_cache_alloc in super.c appropriately checks return value so this is fully plumbed through. This V2 fixes checkpatch warnings and improves the commit description, as I was too hasty getting the previous version out. Signed-off-by: Michael Lyle Reviewed-by: Vojtech Pavlik Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/md/bcache/btree.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 658c54b3b07a96..1598d1e04989ac 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -807,7 +807,10 @@ int bch_btree_cache_alloc(struct cache_set *c) c->shrink.scan_objects = bch_mca_scan; c->shrink.seeks = 4; c->shrink.batch = c->btree_pages * 2; - register_shrinker(&c->shrink); + + if (register_shrinker(&c->shrink)) + pr_warn("bcache: %s: could not register shrinker", + __func__); return 0; } From a595f190fc6be88c3942b468819378a977379fe1 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 1 Nov 2017 19:21:55 -0400 Subject: [PATCH 1599/3326] drm/amdgpu: Fix SDMA load/unload sequence on HWS disabled mode [ Upstream commit cf21654b40968609779751b34e7923180968fe5b ] Fix the SDMA load and unload sequence as suggested by HW document. Signed-off-by: shaoyun liu Signed-off-by: Felix Kuehling Acked-by: Oded Gabbay Signed-off-by: Oded Gabbay Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 47 ++++++++++++++----- 1 file changed, 34 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index b9dbbf9cb8b07e..bdabaa3399db2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -369,29 +369,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; + unsigned long end_jiffies; uint32_t sdma_base_addr; + uint32_t data; m = get_sdma_mqd(mqd); sdma_base_addr = get_sdma_base_addr(m); - WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, - m->sdma_rlc_virtual_addr); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, - m->sdma_rlc_rb_base); + end_jiffies = msecs_to_jiffies(2000) + jiffies; + while (true) { + data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) + return -ETIME; + usleep_range(500, 1000); + } + if (m->sdma_engine_id) { + data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); + data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, + RESUME_CTX, 0); + WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); + } else { + data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); + data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, + RESUME_CTX, 0); + WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); + } + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, + m->sdma_rlc_doorbell); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, + m->sdma_rlc_virtual_addr); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, m->sdma_rlc_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdma_rlc_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdma_rlc_rb_rptr_addr_hi); - - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, - m->sdma_rlc_doorbell); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, m->sdma_rlc_rb_cntl); @@ -564,9 +585,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, } WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); return 0; } From 64aca9911a1097b224739e2cf060c3114e6bd59f Mon Sep 17 00:00:00 2001 From: shaoyunl Date: Wed, 1 Nov 2017 19:21:56 -0400 Subject: [PATCH 1600/3326] drm/amdkfd: Fix SDMA ring buffer size calculation [ Upstream commit d12fb13f23199faa7e536acec1db49068e5a067d ] ffs function return the position of the first bit set on 1 based. (bit zero returns 1). Signed-off-by: shaoyun liu Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 44ffd23348fc44..164fa4b1f9a9d7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -205,8 +205,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, struct cik_sdma_rlc_registers *m; m = get_sdma_mqd(mqd); - m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) << - SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | + m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1) + << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; From b15f53b46d880fae35c976d8285c09debbb760bb Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 1 Nov 2017 19:21:57 -0400 Subject: [PATCH 1601/3326] drm/amdkfd: Fix SDMA oversubsription handling [ Upstream commit 8c946b8988acec785bcf67088b6bd0747f36d2d3 ] SDMA only supports a fixed number of queues. HWS cannot handle oversubscription. Signed-off-by: shaoyun liu Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- .../drm/amd/amdkfd/kfd_process_queue_manager.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 03bec765b03d94..f9a1a4db9be745 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -184,6 +184,24 @@ int pqm_create_queue(struct process_queue_manager *pqm, switch (type) { case KFD_QUEUE_TYPE_SDMA: + if (dev->dqm->queue_count >= + CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) { + pr_err("Over-subscription is not allowed for SDMA.\n"); + retval = -EPERM; + goto err_create_queue; + } + + retval = create_cp_queue(pqm, dev, &q, properties, f, *qid); + if (retval != 0) + goto err_create_queue; + pqn->q = q; + pqn->kq = NULL; + retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, + &q->properties.vmid); + pr_debug("DQM returned %d for create_queue\n", retval); + print_queue(q); + break; + case KFD_QUEUE_TYPE_COMPUTE: /* check if there is over subscription */ if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && From 120c41af36df87401576f09498abf5696fb3d474 Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Mon, 13 Nov 2017 03:35:27 +0300 Subject: [PATCH 1602/3326] uapi: fix linux/kfd_ioctl.h userspace compilation errors [ Upstream commit b4d085201d86af69cbda2214c6dafc0be240ef9f ] Consistently use types provided by via to fix the following linux/kfd_ioctl.h userspace compilation errors: /usr/include/linux/kfd_ioctl.h:236:2: error: unknown type name 'uint64_t' uint64_t va_addr; /* to KFD */ /usr/include/linux/kfd_ioctl.h:237:2: error: unknown type name 'uint32_t' uint32_t gpu_id; /* to KFD */ /usr/include/linux/kfd_ioctl.h:238:2: error: unknown type name 'uint32_t' uint32_t pad; /usr/include/linux/kfd_ioctl.h:243:2: error: unknown type name 'uint64_t' uint64_t tile_config_ptr; /usr/include/linux/kfd_ioctl.h:245:2: error: unknown type name 'uint64_t' uint64_t macro_tile_config_ptr; /usr/include/linux/kfd_ioctl.h:249:2: error: unknown type name 'uint32_t' uint32_t num_tile_configs; /usr/include/linux/kfd_ioctl.h:253:2: error: unknown type name 'uint32_t' uint32_t num_macro_tile_configs; /usr/include/linux/kfd_ioctl.h:255:2: error: unknown type name 'uint32_t' uint32_t gpu_id; /* to KFD */ /usr/include/linux/kfd_ioctl.h:256:2: error: unknown type name 'uint32_t' uint32_t gb_addr_config; /* from KFD */ /usr/include/linux/kfd_ioctl.h:257:2: error: unknown type name 'uint32_t' uint32_t num_banks; /* from KFD */ /usr/include/linux/kfd_ioctl.h:258:2: error: unknown type name 'uint32_t' uint32_t num_ranks; /* from KFD */ Fixes: 6a1c9510694fe ("drm/amdkfd: Adding new IOCTL for scratch memory v2") Fixes: 5d71dbc3a5886 ("drm/amdkfd: Implement image tiling mode support v2") Signed-off-by: Dmitry V. Levin Signed-off-by: Oded Gabbay Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/kfd_ioctl.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 26283fefdf5fd5..f7015aa1234747 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -233,29 +233,29 @@ struct kfd_ioctl_wait_events_args { }; struct kfd_ioctl_set_scratch_backing_va_args { - uint64_t va_addr; /* to KFD */ - uint32_t gpu_id; /* to KFD */ - uint32_t pad; + __u64 va_addr; /* to KFD */ + __u32 gpu_id; /* to KFD */ + __u32 pad; }; struct kfd_ioctl_get_tile_config_args { /* to KFD: pointer to tile array */ - uint64_t tile_config_ptr; + __u64 tile_config_ptr; /* to KFD: pointer to macro tile array */ - uint64_t macro_tile_config_ptr; + __u64 macro_tile_config_ptr; /* to KFD: array size allocated by user mode * from KFD: array size filled by kernel */ - uint32_t num_tile_configs; + __u32 num_tile_configs; /* to KFD: array size allocated by user mode * from KFD: array size filled by kernel */ - uint32_t num_macro_tile_configs; + __u32 num_macro_tile_configs; - uint32_t gpu_id; /* to KFD */ - uint32_t gb_addr_config; /* from KFD */ - uint32_t num_banks; /* from KFD */ - uint32_t num_ranks; /* from KFD */ + __u32 gpu_id; /* to KFD */ + __u32 gb_addr_config; /* from KFD */ + __u32 num_banks; /* from KFD */ + __u32 num_ranks; /* from KFD */ /* struct size can be extended later if needed * without breaking ABI compatibility */ From dd45c5e5be071fb9fee6e5f82fed6e0bf67737d5 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 23 Nov 2017 17:35:22 +0200 Subject: [PATCH 1603/3326] nvme-rdma: don't complete requests before a send work request has completed [ Upstream commit 4af7f7ff92a42b6c713293c99e7982bcfcf51a70 ] In order to guarantee that the HCA will never get an access violation (either from invalidated rkey or from iommu) when retrying a send operation we must complete a request only when both send completion and the nvme cqe has arrived. We need to set the send/recv completions flags atomically because we might have more than a single context accessing the request concurrently (one is cq irq-poll context and the other is user-polling used in IOCB_HIPRI). Only then we are safe to invalidate the rkey (if needed), unmap the host buffers, and complete the IO. Signed-off-by: Sagi Grimberg Reviewed-by: Max Gurtovoy Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/host/rdma.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 6de163e6c9ebab..33d4431c2b4b18 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -67,6 +67,9 @@ struct nvme_rdma_request { struct nvme_request req; struct ib_mr *mr; struct nvme_rdma_qe sqe; + union nvme_result result; + __le16 status; + refcount_t ref; struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; u32 num_sge; int nents; @@ -1177,6 +1180,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, req->num_sge = 1; req->inline_data = false; req->mr->need_inval = false; + refcount_set(&req->ref, 2); /* send and recv completions */ c->common.flags |= NVME_CMD_SGL_METABUF; @@ -1213,8 +1217,19 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) { - if (unlikely(wc->status != IB_WC_SUCCESS)) + struct nvme_rdma_qe *qe = + container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); + struct nvme_rdma_request *req = + container_of(qe, struct nvme_rdma_request, sqe); + struct request *rq = blk_mq_rq_from_pdu(req); + + if (unlikely(wc->status != IB_WC_SUCCESS)) { nvme_rdma_wr_error(cq, wc, "SEND"); + return; + } + + if (refcount_dec_and_test(&req->ref)) + nvme_end_request(rq, req->status, req->result); } /* @@ -1359,14 +1374,19 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, } req = blk_mq_rq_to_pdu(rq); - if (rq->tag == tag) - ret = 1; + req->status = cqe->status; + req->result = cqe->result; if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) && wc->ex.invalidate_rkey == req->mr->rkey) req->mr->need_inval = false; - nvme_end_request(rq, cqe->status, cqe->result); + if (refcount_dec_and_test(&req->ref)) { + if (rq->tag == tag) + ret = 1; + nvme_end_request(rq, req->status, req->result); + } + return ret; } From 9be97a9abe9e0a3166320f21a42dc5e0966cb5c4 Mon Sep 17 00:00:00 2001 From: zhangliping Date: Sat, 25 Nov 2017 22:02:12 +0800 Subject: [PATCH 1604/3326] openvswitch: fix the incorrect flow action alloc size [ Upstream commit 67c8d22a73128ff910e2287567132530abcf5b71 ] If we want to add a datapath flow, which has more than 500 vxlan outputs' action, we will get the following error reports: openvswitch: netlink: Flow action size 32832 bytes exceeds max openvswitch: netlink: Flow action size 32832 bytes exceeds max openvswitch: netlink: Actions may not be safe on all matching packets ... ... It seems that we can simply enlarge the MAX_ACTIONS_BUFSIZE to fix it, but this is not the root cause. For example, for a vxlan output action, we need about 60 bytes for the nlattr, but after it is converted to the flow action, it only occupies 24 bytes. This means that we can still support more than 1000 vxlan output actions for a single datapath flow under the the current 32k max limitation. So even if the nla_len(attr) is larger than MAX_ACTIONS_BUFSIZE, we shouldn't report EINVAL and keep it move on, as the judgement can be done by the reserve_sfa_size. Signed-off-by: zhangliping Acked-by: Pravin B Shelar Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/openvswitch/flow_netlink.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index e8eb427ce6d1f9..0d9f6afa266c5b 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -1903,14 +1903,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb) #define MAX_ACTIONS_BUFSIZE (32 * 1024) -static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log) +static struct sw_flow_actions *nla_alloc_flow_actions(int size) { struct sw_flow_actions *sfa; - if (size > MAX_ACTIONS_BUFSIZE) { - OVS_NLERR(log, "Flow action size %u bytes exceeds max", size); - return ERR_PTR(-EINVAL); - } + WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE); sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); if (!sfa) @@ -1983,12 +1980,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, new_acts_size = ksize(*sfa) * 2; if (new_acts_size > MAX_ACTIONS_BUFSIZE) { - if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) + if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) { + OVS_NLERR(log, "Flow action size exceeds max %u", + MAX_ACTIONS_BUFSIZE); return ERR_PTR(-EMSGSIZE); + } new_acts_size = MAX_ACTIONS_BUFSIZE; } - acts = nla_alloc_flow_actions(new_acts_size, log); + acts = nla_alloc_flow_actions(new_acts_size); if (IS_ERR(acts)) return (void *)acts; @@ -2660,7 +2660,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, { int err; - *sfa = nla_alloc_flow_actions(nla_len(attr), log); + *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE)); if (IS_ERR(*sfa)) return PTR_ERR(*sfa); From 093a5cb46d05895d02c36d5be784c7a71cd0c585 Mon Sep 17 00:00:00 2001 From: Mirza Krak Date: Wed, 15 Nov 2017 08:24:46 +0000 Subject: [PATCH 1605/3326] drm/rockchip: dw-mipi-dsi: fix possible un-balanced runtime PM enable [ Upstream commit 517f56839f581618d24f2e67a35738a5c6cbaecb ] In the case where the bind gets deferred we would end up with a un-balanced runtime PM enable call. Fix this by simply moving the pm_runtime_enable call to the end of the bind function when all paths have succeeded. Signed-off-by: Mirza Krak Signed-off-by: Sandy Huang Link: https://patchwork.freedesktop.org/patch/msgid/1510734286-37434-1-git-send-email-mirza.krak@endian.se Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/rockchip/dw-mipi-dsi.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c index 9a20b9dc27c834..f7fc652b002703 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c @@ -1275,8 +1275,6 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master, goto err_pllref; } - pm_runtime_enable(dev); - dsi->dsi_host.ops = &dw_mipi_dsi_host_ops; dsi->dsi_host.dev = dev; ret = mipi_dsi_host_register(&dsi->dsi_host); @@ -1291,6 +1289,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master, } dev_set_drvdata(dev, dsi); + pm_runtime_enable(dev); return 0; err_mipi_dsi_host: From e23090a7d8f05f03cf564148472130286f5ca9bf Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 21 Nov 2017 14:46:08 +0100 Subject: [PATCH 1606/3326] mac80211: use QoS NDP for AP probing [ Upstream commit 7b6ddeaf27eca72795ceeae2f0f347db1b5f9a30 ] When connected to a QoS/WMM AP, mac80211 should use a QoS NDP for probing it, instead of a regular non-QoS one, fix this. Change all the drivers to *not* allow QoS NDP for now, even though it looks like most of them should be OK with that. Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/ath/ath9k/channel.c | 2 +- drivers/net/wireless/st/cw1200/sta.c | 4 ++-- drivers/net/wireless/ti/wl1251/main.c | 2 +- drivers/net/wireless/ti/wlcore/cmd.c | 5 ++-- include/net/mac80211.h | 8 ++++++- net/mac80211/mlme.c | 2 +- net/mac80211/tx.c | 29 ++++++++++++++++++++++-- 7 files changed, 42 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index f0439f2d566b13..173891b11b2d71 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -1112,7 +1112,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp, if (!avp->assoc) return false; - skb = ieee80211_nullfunc_get(sc->hw, vif); + skb = ieee80211_nullfunc_get(sc->hw, vif, false); if (!skb) return false; diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c index a52224836a2bb6..666b88cb2cfe2c 100644 --- a/drivers/net/wireless/st/cw1200/sta.c +++ b/drivers/net/wireless/st/cw1200/sta.c @@ -198,7 +198,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv, priv->bss_loss_state++; - skb = ieee80211_nullfunc_get(priv->hw, priv->vif); + skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false); WARN_ON(!skb); if (skb) cw1200_tx(priv->hw, NULL, skb); @@ -2266,7 +2266,7 @@ static int cw1200_upload_null(struct cw1200_common *priv) .rate = 0xFF, }; - frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif); + frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false); if (!frame.skb) return -ENOMEM; diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 9915d83a4a3055..6d02c660b4ab78 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -566,7 +566,7 @@ static int wl1251_build_null_data(struct wl1251 *wl) size = sizeof(struct wl12xx_null_data_template); ptr = NULL; } else { - skb = ieee80211_nullfunc_get(wl->hw, wl->vif); + skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false); if (!skb) goto out; size = skb->len; diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 2bfc12fdc9292b..761cf8573a805e 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -1069,7 +1069,8 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif) ptr = NULL; } else { skb = ieee80211_nullfunc_get(wl->hw, - wl12xx_wlvif_to_vif(wlvif)); + wl12xx_wlvif_to_vif(wlvif), + false); if (!skb) goto out; size = skb->len; @@ -1096,7 +1097,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl, struct sk_buff *skb = NULL; int ret = -ENOMEM; - skb = ieee80211_nullfunc_get(wl->hw, vif); + skb = ieee80211_nullfunc_get(wl->hw, vif, false); if (!skb) goto out; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 885690fa39c8d8..4f1d2dec43cef8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -4470,18 +4470,24 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, * ieee80211_nullfunc_get - retrieve a nullfunc template * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @qos_ok: QoS NDP is acceptable to the caller, this should be set + * if at all possible * * Creates a Nullfunc template which can, for example, uploaded to * hardware. The template must be updated after association so that correct * BSSID and address is used. * + * If @qos_ndp is set and the association is to an AP with QoS/WMM, the + * returned packet will be QoS NDP. + * * Note: Caller (or hardware) is responsible for setting the * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields. * * Return: The nullfunc template. %NULL on error. */ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + bool qos_ok); /** * ieee80211_probereq_get - retrieve a Probe Request template diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 3b8e2709d8dea1..9115cc52ce8312 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -908,7 +908,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local, struct ieee80211_hdr_3addr *nullfunc; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif); + skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true); if (!skb) return; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 94826680cf2b54..73429841f1155a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -4404,13 +4404,15 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, EXPORT_SYMBOL(ieee80211_pspoll_get); struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + bool qos_ok) { struct ieee80211_hdr_3addr *nullfunc; struct ieee80211_sub_if_data *sdata; struct ieee80211_if_managed *ifmgd; struct ieee80211_local *local; struct sk_buff *skb; + bool qos = false; if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) return NULL; @@ -4419,7 +4421,17 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, ifmgd = &sdata->u.mgd; local = sdata->local; - skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc)); + if (qos_ok) { + struct sta_info *sta; + + rcu_read_lock(); + sta = sta_info_get(sdata, ifmgd->bssid); + qos = sta && sta->sta.wme; + rcu_read_unlock(); + } + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + + sizeof(*nullfunc) + 2); if (!skb) return NULL; @@ -4429,6 +4441,19 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS); + if (qos) { + __le16 qos = cpu_to_le16(7); + + BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC | + IEEE80211_STYPE_NULLFUNC) != + IEEE80211_STYPE_QOS_NULLFUNC); + nullfunc->frame_control |= + cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC); + skb->priority = 7; + skb_set_queue_mapping(skb, IEEE80211_AC_VO); + skb_put_data(skb, &qos, sizeof(qos)); + } + memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN); memcpy(nullfunc->addr2, vif->addr, ETH_ALEN); memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN); From 841211271eccb49f7f4a71442c13a09d4b6a92b0 Mon Sep 17 00:00:00 2001 From: Chun-Yeow Yeoh Date: Tue, 14 Nov 2017 23:20:05 +0800 Subject: [PATCH 1607/3326] mac80211: fix the update of path metric for RANN frame [ Upstream commit fbbdad5edf0bb59786a51b94a9d006bc8c2da9a2 ] The previous path metric update from RANN frame has not considered the own link metric toward the transmitting mesh STA. Fix this. Reported-by: Michael65535 Signed-off-by: Chun-Yeow Yeoh Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/mac80211/mesh_hwmp.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index d8bbd0d2225a16..d6d3f316de4cf3 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -797,7 +797,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, struct mesh_path *mpath; u8 ttl, flags, hopcount; const u8 *orig_addr; - u32 orig_sn, metric, metric_txsta, interval; + u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval; bool root_is_gate; ttl = rann->rann_ttl; @@ -808,7 +808,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, interval = le32_to_cpu(rann->rann_interval); hopcount = rann->rann_hopcount; hopcount++; - metric = le32_to_cpu(rann->rann_metric); + orig_metric = le32_to_cpu(rann->rann_metric); /* Ignore our own RANNs */ if (ether_addr_equal(orig_addr, sdata->vif.addr)) @@ -825,7 +825,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, return; } - metric_txsta = airtime_link_metric_get(local, sta); + last_hop_metric = airtime_link_metric_get(local, sta); + new_metric = orig_metric + last_hop_metric; + if (new_metric < orig_metric) + new_metric = MAX_METRIC; mpath = mesh_path_lookup(sdata, orig_addr); if (!mpath) { @@ -838,7 +841,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, } if (!(SN_LT(mpath->sn, orig_sn)) && - !(mpath->sn == orig_sn && metric < mpath->rann_metric)) { + !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) { rcu_read_unlock(); return; } @@ -856,7 +859,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, } mpath->sn = orig_sn; - mpath->rann_metric = metric + metric_txsta; + mpath->rann_metric = new_metric; mpath->is_root = true; /* Recording RANNs sender address to send individually * addressed PREQs destined for root mesh STA */ @@ -876,7 +879,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, orig_sn, 0, NULL, 0, broadcast_addr, hopcount, ttl, interval, - metric + metric_txsta, 0, sdata); + new_metric, 0, sdata); } rcu_read_unlock(); From 928066e61940e3c19f85d543d15c55fcae20208f Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 15 Nov 2017 16:20:52 -0500 Subject: [PATCH 1608/3326] btrfs: fix deadlock when writing out space cache [ Upstream commit b77000ed558daa3bef0899d29bf171b8c9b5e6a8 ] If we fail to prepare our pages for whatever reason (out of memory in our case) we need to make sure to drop the block_group->data_rwsem, otherwise hilarity ensues. Signed-off-by: Josef Bacik Reviewed-by: Omar Sandoval Reviewed-by: Liu Bo Reviewed-by: David Sterba [ add label and use existing unlocking code ] Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/free-space-cache.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index cdc9f4015ec36c..4426d1c73e50f1 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1264,7 +1264,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, /* Lock all pages first so we can lock the extent safely. */ ret = io_ctl_prepare_pages(io_ctl, inode, 0); if (ret) - goto out; + goto out_unlock; lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, &cached_state); @@ -1358,6 +1358,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, out_nospc: cleanup_write_cache_enospc(inode, io_ctl, &cached_state); +out_unlock: if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) up_write(&block_group->data_rwsem); From d4c72a410f3bbcddac69f666024934a492dd8c38 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 25 Nov 2017 21:05:34 +0800 Subject: [PATCH 1609/3326] sctp: only allow the asoc reset when the asoc outq is empty [ Upstream commit 5c6144a0eb5366ae07fc5059301b139338f39bbd ] As it says in rfc6525#section5.1.4, before sending the request, C2: The sender has either no outstanding TSNs or considers all outstanding TSNs abandoned. Prior to this patch, it tried to consider all outstanding TSNs abandoned by dropping all chunks in all outqs with sctp_outq_free (even including sacked, retransmit and transmitted queues) when doing this reset, which is too aggressive. To make it work gently, this patch will only allow the asoc reset when the sender has no outstanding TSNs by checking if unsent, transmitted and retransmit are all empty with sctp_outq_is_empty before sending and processing the request. Fixes: 692787cef651 ("sctp: implement receiver-side procedures for the SSN/TSN Reset Request Parameter") Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/sctp/stream.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 724adf2786a23b..7710133238ea48 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -224,6 +224,9 @@ int sctp_send_reset_assoc(struct sctp_association *asoc) if (asoc->strreset_outstanding) return -EINPROGRESS; + if (!sctp_outq_is_empty(&asoc->outqueue)) + return -EAGAIN; + chunk = sctp_make_strreset_tsnreq(asoc); if (!chunk) return -ENOMEM; @@ -544,6 +547,12 @@ struct sctp_chunk *sctp_process_strreset_tsnreq( } goto err; } + + if (!sctp_outq_is_empty(&asoc->outqueue)) { + result = SCTP_STRRESET_IN_PROGRESS; + goto err; + } + asoc->strreset_inseq++; if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ)) From 55f3de731c0b21ad37f80c7819a925278356ddf1 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 25 Nov 2017 21:05:35 +0800 Subject: [PATCH 1610/3326] sctp: avoid flushing unsent queue when doing asoc reset [ Upstream commit 159f2a7456c6ae95c1e1a58e8b8ec65ef12d51cf ] Now when doing asoc reset, it cleans up sacked and abandoned queues by calling sctp_outq_free where it also cleans up unsent, retransmit and transmitted queues. It's safe for the sender of response, as these 3 queues are empty at that time. But when the receiver of response is doing the reset, the users may already enqueue some chunks into unsent during the time waiting the response, and these chunks should not be flushed. To void the chunks in it would be removed, it moves the queue into a temp list, then gets it back after sctp_outq_free is done. The patch also fixes some incorrect comments in sctp_process_strreset_tsnreq. Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/sctp/stream.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 7710133238ea48..aa629654d27e90 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -563,9 +563,10 @@ struct sctp_chunk *sctp_process_strreset_tsnreq( goto out; } - /* G3: The same processing as though a SACK chunk with no gap report - * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were - * received MUST be performed. + /* G4: The same processing as though a FWD-TSN chunk (as defined in + * [RFC3758]) with all streams affected and a new cumulative TSN + * ACK of the Receiver's Next TSN minus 1 were received MUST be + * performed. */ max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen); @@ -580,10 +581,9 @@ struct sctp_chunk *sctp_process_strreset_tsnreq( sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, init_tsn, GFP_ATOMIC); - /* G4: The same processing as though a FWD-TSN chunk (as defined in - * [RFC3758]) with all streams affected and a new cumulative TSN - * ACK of the Receiver's Next TSN minus 1 were received MUST be - * performed. + /* G3: The same processing as though a SACK chunk with no gap report + * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were + * received MUST be performed. */ sctp_outq_free(&asoc->outqueue); @@ -844,6 +844,7 @@ struct sctp_chunk *sctp_process_strreset_resp( if (result == SCTP_STRRESET_PERFORMED) { __u32 mtsn = sctp_tsnmap_get_max_tsn_seen( &asoc->peer.tsn_map); + LIST_HEAD(temp); sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn); sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); @@ -852,7 +853,13 @@ struct sctp_chunk *sctp_process_strreset_resp( SCTP_TSN_MAP_INITIAL, stsn, GFP_ATOMIC); + /* Clean up sacked and abandoned queues only. As the + * out_chunk_list may not be empty, splice it to temp, + * then get it back after sctp_outq_free is done. + */ + list_splice_init(&asoc->outqueue.out_chunk_list, &temp); sctp_outq_free(&asoc->outqueue); + list_splice_init(&temp, &asoc->outqueue.out_chunk_list); asoc->next_tsn = rtsn; asoc->ctsn_ack_point = asoc->next_tsn - 1; From 14a4e9f6bde8cdba1c0896c0af31304a934e0427 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 25 Nov 2017 21:05:36 +0800 Subject: [PATCH 1611/3326] sctp: set sender next_tsn for the old result with ctsn_ack_point plus 1 [ Upstream commit 52a395896a051a3d5c34fba67c324f69ec5e67c6 ] When doing asoc reset, if the sender of the response has already sent some chunk and increased asoc->next_tsn before the duplicate request comes, the response will use the old result with an incorrect sender next_tsn. Better than asoc->next_tsn, asoc->ctsn_ack_point can't be changed after the sender of the response has performed the asoc reset and before the peer has confirmed it, and it's value is still asoc->next_tsn original value minus 1. This patch sets sender next_tsn for the old result with ctsn_ack_point plus 1 when processing the duplicate request, to make sure the sender next_tsn value peer gets will be always right. Fixes: 692787cef651 ("sctp: implement receiver-side procedures for the SSN/TSN Reset Request Parameter") Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/sctp/stream.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sctp/stream.c b/net/sctp/stream.c index aa629654d27e90..9ea6057ed28b21 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -541,7 +541,7 @@ struct sctp_chunk *sctp_process_strreset_tsnreq( i = asoc->strreset_inseq - request_seq - 1; result = asoc->strreset_result[i]; if (result == SCTP_STRRESET_PERFORMED) { - next_tsn = asoc->next_tsn; + next_tsn = asoc->ctsn_ack_point + 1; init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1; } From 9a447435afabf52b16d1295d0f18bef5ab3abb65 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 30 Oct 2017 11:20:15 -0400 Subject: [PATCH 1612/3326] reiserfs: remove unneeded i_version bump [ Upstream commit 9f97df50c52c2887432debb6238f4e43567386a5 ] The i_version field in reiserfs is not initialized and is only ever updated here. Nothing ever views it, so just remove it. Signed-off-by: Jeff Layton Signed-off-by: Jan Kara Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/reiserfs/super.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 5464ec517702f1..4885c7b6e44fb9 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -2591,7 +2591,6 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type, return err; if (inode->i_size < off + len - towrite) i_size_write(inode, off + len - towrite); - inode->i_version++; inode->i_mtime = inode->i_ctime = current_time(inode); mark_inode_dirty(inode); return len - towrite; From 2f1b5183aee337be1c2444c7f6be189ddf14d068 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 20 Nov 2017 14:55:05 -0800 Subject: [PATCH 1613/3326] KVM: X86: Fix softlockup when get the current kvmclock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit e70b57a6ce4e8b92a56a615ae79bdb2bd66035e7 ] watchdog: BUG: soft lockup - CPU#6 stuck for 22s! [qemu-system-x86:10185] CPU: 6 PID: 10185 Comm: qemu-system-x86 Tainted: G OE 4.14.0-rc4+ #4 RIP: 0010:kvm_get_time_scale+0x4e/0xa0 [kvm] Call Trace: get_time_ref_counter+0x5a/0x80 [kvm] kvm_hv_process_stimers+0x120/0x5f0 [kvm] kvm_arch_vcpu_ioctl_run+0x4b4/0x1690 [kvm] kvm_vcpu_ioctl+0x33a/0x620 [kvm] do_vfs_ioctl+0xa1/0x5d0 SyS_ioctl+0x79/0x90 entry_SYSCALL_64_fastpath+0x1e/0xa9 This can be reproduced when running kvm-unit-tests/hyperv_stimer.flat and cpu-hotplug stress simultaneously. __this_cpu_read(cpu_tsc_khz) returns 0 (set in kvmclock_cpu_down_prep()) when the pCPU is unhotplug which results in kvm_get_time_scale() gets into an infinite loop. This patch fixes it by treating the unhotplug pCPU as not using master clock. Reviewed-by: Radim Krčmář Reviewed-by: David Hildenbrand Cc: Paolo Bonzini Cc: Radim Krčmář Signed-off-by: Wanpeng Li Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/x86.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b25326ad82a13a..2dd19f2342457f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1795,10 +1795,13 @@ u64 get_kvmclock_ns(struct kvm *kvm) /* both __this_cpu_read() and rdtsc() should be on the same cpu */ get_cpu(); - kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, - &hv_clock.tsc_shift, - &hv_clock.tsc_to_system_mul); - ret = __pvclock_read_cycles(&hv_clock, rdtsc()); + if (__this_cpu_read(cpu_tsc_khz)) { + kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, + &hv_clock.tsc_shift, + &hv_clock.tsc_to_system_mul); + ret = __pvclock_read_cycles(&hv_clock, rdtsc()); + } else + ret = ktime_get_boot_ns() + ka->kvmclock_offset; put_cpu(); From 2ce1bbfb5e67563bc01c0ba025fefc3b4b1fc391 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 20 Nov 2017 14:52:21 -0800 Subject: [PATCH 1614/3326] KVM: VMX: Fix rflags cache during vCPU reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit c37c28730bb031cc8a44a130c2555c0f3efbe2d0 ] Reported by syzkaller: *** Guest State *** CR0: actual=0x0000000080010031, shadow=0x0000000060000010, gh_mask=fffffffffffffff7 CR4: actual=0x0000000000002061, shadow=0x0000000000000000, gh_mask=ffffffffffffe8f1 CR3 = 0x000000002081e000 RSP = 0x000000000000fffa RIP = 0x0000000000000000 RFLAGS=0x00023000 DR7 = 0x00000000000000 ^^^^^^^^^^ ------------[ cut here ]------------ WARNING: CPU: 6 PID: 24431 at /home/kernel/linux/arch/x86/kvm//x86.c:7302 kvm_arch_vcpu_ioctl_run+0x651/0x2ea0 [kvm] CPU: 6 PID: 24431 Comm: reprotest Tainted: G W OE 4.14.0+ #26 RIP: 0010:kvm_arch_vcpu_ioctl_run+0x651/0x2ea0 [kvm] RSP: 0018:ffff880291d179e0 EFLAGS: 00010202 Call Trace: kvm_vcpu_ioctl+0x479/0x880 [kvm] do_vfs_ioctl+0x142/0x9a0 SyS_ioctl+0x74/0x80 entry_SYSCALL_64_fastpath+0x23/0x9a The failed vmentry is triggered by the following beautified testcase: #include #include #include #include #include #include #include long r[5]; int main() { struct kvm_debugregs dr = { 0 }; r[2] = open("/dev/kvm", O_RDONLY); r[3] = ioctl(r[2], KVM_CREATE_VM, 0); r[4] = ioctl(r[3], KVM_CREATE_VCPU, 7); struct kvm_guest_debug debug = { .control = 0xf0403, .arch = { .debugreg[6] = 0x2, .debugreg[7] = 0x2 } }; ioctl(r[4], KVM_SET_GUEST_DEBUG, &debug); ioctl(r[4], KVM_RUN, 0); } which testcase tries to setup the processor specific debug registers and configure vCPU for handling guest debug events through KVM_SET_GUEST_DEBUG. The KVM_SET_GUEST_DEBUG ioctl will get and set rflags in order to set TF bit if single step is needed. All regs' caches are reset to avail and GUEST_RFLAGS vmcs field is reset to 0x2 during vCPU reset. However, the cache of rflags is not reset during vCPU reset. The function vmx_get_rflags() returns an unreset rflags cache value since the cache is marked avail, it is 0 after boot. Vmentry fails if the rflags reserved bit 1 is 0. This patch fixes it by resetting both the GUEST_RFLAGS vmcs field and its cache to 0x2 during vCPU reset. Reported-by: Dmitry Vyukov Tested-by: Dmitry Vyukov Reviewed-by: David Hildenbrand Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Nadav Amit Cc: Dmitry Vyukov Signed-off-by: Wanpeng Li Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d073f91bc08911..5242fac8165b4f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5608,7 +5608,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmcs_write64(GUEST_IA32_DEBUGCTL, 0); } - vmcs_writel(GUEST_RFLAGS, 0x02); + kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); kvm_rip_write(vcpu, 0xfff0); vmcs_writel(GUEST_GDTR_BASE, 0); From 809981870b6670d5ac2338325865900d88acf2b4 Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Tue, 21 Nov 2017 14:35:40 -0700 Subject: [PATCH 1615/3326] Btrfs: fix list_add corruption and soft lockups in fsync [ Upstream commit ebb70442cdd4872260c2415929c456be3562da82 ] Xfstests btrfs/146 revealed this corruption, [ 58.138831] Buffer I/O error on dev dm-0, logical block 2621424, async page read [ 58.151233] BTRFS error (device sdf): bdev /dev/mapper/error-test errs: wr 1, rd 0, flush 0, corrupt 0, gen 0 [ 58.152403] list_add corruption. prev->next should be next (ffff88005e6775d8), but was ffffc9000189be88. (prev=ffffc9000189be88). [ 58.153518] ------------[ cut here ]------------ [ 58.153892] WARNING: CPU: 1 PID: 1287 at lib/list_debug.c:31 __list_add_valid+0x169/0x1f0 ... [ 58.157379] RIP: 0010:__list_add_valid+0x169/0x1f0 ... [ 58.161956] Call Trace: [ 58.162264] btrfs_log_inode_parent+0x5bd/0xfb0 [btrfs] [ 58.163583] btrfs_log_dentry_safe+0x60/0x80 [btrfs] [ 58.164003] btrfs_sync_file+0x4c2/0x6f0 [btrfs] [ 58.164393] vfs_fsync_range+0x5f/0xd0 [ 58.164898] do_fsync+0x5a/0x90 [ 58.165170] SyS_fsync+0x10/0x20 [ 58.165395] entry_SYSCALL_64_fastpath+0x1f/0xbe ... It turns out that we could record btrfs_log_ctx:io_err in log_one_extents when IO fails, but make log_one_extents() return '0' instead of -EIO, so the IO error is not acknowledged by the callers, i.e. btrfs_log_inode_parent(), which would remove btrfs_log_ctx:list from list head 'root->log_ctxs'. Since btrfs_log_ctx is allocated from stack memory, it'd get freed with a object alive on the list. then a future list_add will throw the above warning. This returns the correct error in the above case. Jeff also reported this while testing against his fsync error patch set[1]. [1]: https://www.spinics.net/lists/linux-btrfs/msg65308.html "btrfs list corruption and soft lockups while testing writeback error handling" Fixes: 8407f553268a4611f254 ("Btrfs: fix data corruption after fast fsync and writeback error") Signed-off-by: Liu Bo Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/file.c | 5 +++-- fs/btrfs/tree-log.c | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index aafcc785f8408c..d564a7049d7fff 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2056,6 +2056,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) len = (u64)end - (u64)start + 1; trace_btrfs_sync_file(file, datasync); + btrfs_init_log_ctx(&ctx, inode); + /* * We write the dirty pages in the range and wait until they complete * out of the ->i_mutex. If so, we can flush the dirty pages by @@ -2202,8 +2204,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) } trans->sync = true; - btrfs_init_log_ctx(&ctx, inode); - ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx); if (ret < 0) { /* Fallthrough and commit/free transaction. */ @@ -2261,6 +2261,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ret = btrfs_end_transaction(trans); } out: + ASSERT(list_empty(&ctx.list)); err = file_check_and_advance_wb_err(file); if (!ret) ret = err; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c800d067fcbf0c..d3002842d7f68d 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4100,7 +4100,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans, if (ordered_io_err) { ctx->io_err = -EIO; - return 0; + return ctx->io_err; } btrfs_init_map_token(&token); From 40ba283e2602d319d00d4f6539b5113eb8d25d24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20H=2E=20Sch=C3=B6nherr?= Date: Fri, 24 Nov 2017 22:39:01 +0100 Subject: [PATCH 1616/3326] KVM: Let KVM_SET_SIGNAL_MASK work as advertised MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 20b7035c66bacc909ae3ffe92c1a1ea7db99fe4f ] KVM API says for the signal mask you set via KVM_SET_SIGNAL_MASK, that "any unblocked signal received [...] will cause KVM_RUN to return with -EINTR" and that "the signal will only be delivered if not blocked by the original signal mask". This, however, is only true, when the calling task has a signal handler registered for a signal. If not, signal evaluation is short-circuited for SIG_IGN and SIG_DFL, and the signal is either ignored without KVM_RUN returning or the whole process is terminated. Make KVM_SET_SIGNAL_MASK behave as advertised by utilizing logic similar to that in do_sigtimedwait() to avoid short-circuiting of signals. Signed-off-by: Jan H. Schönherr Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/mips/kvm/mips.c | 7 ++----- arch/powerpc/kvm/powerpc.c | 7 ++----- arch/s390/kvm/kvm-s390.c | 7 ++----- arch/x86/kvm/x86.c | 7 ++----- include/linux/kvm_host.h | 3 +++ virt/kvm/arm/arm.c | 8 +++----- virt/kvm/kvm_main.c | 23 +++++++++++++++++++++++ 7 files changed, 37 insertions(+), 25 deletions(-) diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index d535edc0143411..75fdeaa8c62f21 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -445,10 +445,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r = -EINTR; - sigset_t sigsaved; - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + kvm_sigset_activate(vcpu); if (vcpu->mmio_needed) { if (!vcpu->mmio_is_write) @@ -480,8 +478,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) local_irq_enable(); out: - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + kvm_sigset_deactivate(vcpu); return r; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index ee279c7f48021e..2b02d51d14d89b 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -1407,7 +1407,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r; - sigset_t sigsaved; if (vcpu->mmio_needed) { vcpu->mmio_needed = 0; @@ -1448,16 +1447,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) #endif } - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + kvm_sigset_activate(vcpu); if (run->immediate_exit) r = -EINTR; else r = kvmppc_vcpu_run(run, vcpu); - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + kvm_sigset_deactivate(vcpu); return r; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 6c88cb18ace2e3..6e3d80b2048e5d 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -3378,7 +3378,6 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int rc; - sigset_t sigsaved; if (kvm_run->immediate_exit) return -EINTR; @@ -3388,8 +3387,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return 0; } - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + kvm_sigset_activate(vcpu); if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { kvm_s390_vcpu_start(vcpu); @@ -3423,8 +3421,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) disable_cpu_timer_accounting(vcpu); store_regs(vcpu, kvm_run); - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + kvm_sigset_deactivate(vcpu); vcpu->stat.exit_userspace++; return rc; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2dd19f2342457f..8c28023a43b132 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7245,12 +7245,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct fpu *fpu = ¤t->thread.fpu; int r; - sigset_t sigsaved; fpu__initialize(fpu); - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + kvm_sigset_activate(vcpu); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { if (kvm_run->immediate_exit) { @@ -7293,8 +7291,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) out: post_kvm_run_save(vcpu); - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + kvm_sigset_deactivate(vcpu); return r; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6882538eda329f..5a8019befafdcb 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -714,6 +714,9 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len); void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); +void kvm_sigset_activate(struct kvm_vcpu *vcpu); +void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); + void kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 95cba079982897..9a07ee94a2306b 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -612,7 +612,6 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int ret; - sigset_t sigsaved; if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; @@ -630,8 +629,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) if (run->immediate_exit) return -EINTR; - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + kvm_sigset_activate(vcpu); ret = 1; run->exit_reason = KVM_EXIT_UNKNOWN; @@ -753,8 +751,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_pmu_update_run(vcpu); } - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + kvm_sigset_deactivate(vcpu); + return ret; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2447d7c017e707..8401774f5aeb8c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2073,6 +2073,29 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) } EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); +void kvm_sigset_activate(struct kvm_vcpu *vcpu) +{ + if (!vcpu->sigset_active) + return; + + /* + * This does a lockless modification of ->real_blocked, which is fine + * because, only current can change ->real_blocked and all readers of + * ->real_blocked don't care as long ->real_blocked is always a subset + * of ->blocked. + */ + sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); +} + +void kvm_sigset_deactivate(struct kvm_vcpu *vcpu) +{ + if (!vcpu->sigset_active) + return; + + sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); + sigemptyset(¤t->real_blocked); +} + static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) { unsigned int old, val, grow; From 1eccdbd4836a417b2b4e8f1f56db97c541d58e57 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Wed, 22 Nov 2017 12:21:07 -0800 Subject: [PATCH 1617/3326] xfs: always free inline data before resetting inode fork during ifree [ Upstream commit 98c4f78dcdd8cec112d1cbc5e9a792ee6e5ab7a6 ] In xfs_ifree, we reset the data/attr forks to extents format without bothering to free any inline data buffer that might still be around after all the blocks have been truncated off the file. Prior to commit 43518812d2 ("xfs: remove support for inlining data/extents into the inode fork") nobody noticed because the leftover inline data after truncation was small enough to fit inside the inline buffer inside the fork itself. However, now that we've removed the inline buffer, we /always/ have to free the inline data buffer or else we leak them like crazy. This test was found by turning on kmemleak for generic/001 or generic/388. Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/xfs/xfs_inode.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 63350906961ad6..cb4833d0646787 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2421,6 +2421,24 @@ xfs_ifree_cluster( return 0; } +/* + * Free any local-format buffers sitting around before we reset to + * extents format. + */ +static inline void +xfs_ifree_local_data( + struct xfs_inode *ip, + int whichfork) +{ + struct xfs_ifork *ifp; + + if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) + return; + + ifp = XFS_IFORK_PTR(ip, whichfork); + xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); +} + /* * This is called to return an inode to the inode free list. * The inode should already be truncated to 0 length and have @@ -2458,6 +2476,9 @@ xfs_ifree( if (error) return error; + xfs_ifree_local_data(ip, XFS_DATA_FORK); + xfs_ifree_local_data(ip, XFS_ATTR_FORK); + VFS_I(ip)->i_mode = 0; /* mark incore inode as free */ ip->i_d.di_flags = 0; ip->i_d.di_dmevmask = 0; From 30ac846da3600e299cd470ade18e1e859b6a41f8 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 21 Nov 2017 20:53:02 -0800 Subject: [PATCH 1618/3326] xfs: log recovery should replay deferred ops in order [ Upstream commit 509955823cc9cc225c05673b1b83d70ca70c5c60 ] As part of testing log recovery with dm_log_writes, Amir Goldstein discovered an error in the deferred ops recovery that lead to corruption of the filesystem metadata if a reflink+rmap filesystem happened to shut down midway through a CoW remap: "This is what happens [after failed log recovery]: "Phase 1 - find and verify superblock... "Phase 2 - using internal log " - zero log... " - scan filesystem freespace and inode maps... " - found root inode chunk "Phase 3 - for each AG... " - scan (but don't clear) agi unlinked lists... " - process known inodes and perform inode discovery... " - agno = 0 "data fork in regular inode 134 claims CoW block 376 "correcting nextents for inode 134 "bad data fork in inode 134 "would have cleared inode 134" Hou Tao dissected the log contents of exactly such a crash: "According to the implementation of xfs_defer_finish(), these ops should be completed in the following sequence: "Have been done: "(1) CUI: Oper (160) "(2) BUI: Oper (161) "(3) CUD: Oper (194), for CUI Oper (160) "(4) RUI A: Oper (197), free rmap [0x155, 2, -9] "Should be done: "(5) BUD: for BUI Oper (161) "(6) RUI B: add rmap [0x155, 2, 137] "(7) RUD: for RUI A "(8) RUD: for RUI B "Actually be done by xlog_recover_process_intents() "(5) BUD: for BUI Oper (161) "(6) RUI B: add rmap [0x155, 2, 137] "(7) RUD: for RUI B "(8) RUD: for RUI A "So the rmap entry [0x155, 2, -9] for COW should be freed firstly, then a new rmap entry [0x155, 2, 137] will be added. However, as we can see from the log record in post_mount.log (generated after umount) and the trace print, the new rmap entry [0x155, 2, 137] are added firstly, then the rmap entry [0x155, 2, -9] are freed." When reconstructing the internal log state from the log items found on disk, it's required that deferred ops replay in exactly the same order that they would have had the filesystem not gone down. However, replaying unfinished deferred ops can create /more/ deferred ops. These new deferred ops are finished in the wrong order. This causes fs corruption and replay crashes, so let's create a single defer_ops to handle the subsequent ops created during replay, then use one single transaction at the end of log recovery to ensure that everything is replayed in the same order as they're supposed to be. Reported-by: Amir Goldstein Analyzed-by: Hou Tao Reviewed-by: Christoph Hellwig Tested-by: Amir Goldstein Signed-off-by: Darrick J. Wong Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/xfs/xfs_bmap_item.c | 23 ++++-------- fs/xfs/xfs_bmap_item.h | 3 +- fs/xfs/xfs_log_recover.c | 75 ++++++++++++++++++++++++++++++++++---- fs/xfs/xfs_refcount_item.c | 21 ++++------- fs/xfs/xfs_refcount_item.h | 3 +- 5 files changed, 85 insertions(+), 40 deletions(-) diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c index dd136f7275e4a2..e5fb008d75e899 100644 --- a/fs/xfs/xfs_bmap_item.c +++ b/fs/xfs/xfs_bmap_item.c @@ -389,7 +389,8 @@ xfs_bud_init( int xfs_bui_recover( struct xfs_mount *mp, - struct xfs_bui_log_item *buip) + struct xfs_bui_log_item *buip, + struct xfs_defer_ops *dfops) { int error = 0; unsigned int bui_type; @@ -404,9 +405,7 @@ xfs_bui_recover( xfs_exntst_t state; struct xfs_trans *tp; struct xfs_inode *ip = NULL; - struct xfs_defer_ops dfops; struct xfs_bmbt_irec irec; - xfs_fsblock_t firstfsb; ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags)); @@ -464,7 +463,6 @@ xfs_bui_recover( if (VFS_I(ip)->i_nlink == 0) xfs_iflags_set(ip, XFS_IRECOVERY); - xfs_defer_init(&dfops, &firstfsb); /* Process deferred bmap item. */ state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ? @@ -479,16 +477,16 @@ xfs_bui_recover( break; default: error = -EFSCORRUPTED; - goto err_dfops; + goto err_inode; } xfs_trans_ijoin(tp, ip, 0); count = bmap->me_len; - error = xfs_trans_log_finish_bmap_update(tp, budp, &dfops, type, + error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type, ip, whichfork, bmap->me_startoff, bmap->me_startblock, &count, state); if (error) - goto err_dfops; + goto err_inode; if (count > 0) { ASSERT(type == XFS_BMAP_UNMAP); @@ -496,16 +494,11 @@ xfs_bui_recover( irec.br_blockcount = count; irec.br_startoff = bmap->me_startoff; irec.br_state = state; - error = xfs_bmap_unmap_extent(tp->t_mountp, &dfops, ip, &irec); + error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec); if (error) - goto err_dfops; + goto err_inode; } - /* Finish transaction, free inodes. */ - error = xfs_defer_finish(&tp, &dfops); - if (error) - goto err_dfops; - set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); error = xfs_trans_commit(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); @@ -513,8 +506,6 @@ xfs_bui_recover( return error; -err_dfops: - xfs_defer_cancel(&dfops); err_inode: xfs_trans_cancel(tp); if (ip) { diff --git a/fs/xfs/xfs_bmap_item.h b/fs/xfs/xfs_bmap_item.h index c867daae4a3ce5..24b354a2c83641 100644 --- a/fs/xfs/xfs_bmap_item.h +++ b/fs/xfs/xfs_bmap_item.h @@ -93,6 +93,7 @@ struct xfs_bud_log_item *xfs_bud_init(struct xfs_mount *, struct xfs_bui_log_item *); void xfs_bui_item_free(struct xfs_bui_log_item *); void xfs_bui_release(struct xfs_bui_log_item *); -int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip); +int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip, + struct xfs_defer_ops *dfops); #endif /* __XFS_BMAP_ITEM_H__ */ diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index d6e049fdd977d7..eaf29646c28f7e 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -24,6 +24,7 @@ #include "xfs_bit.h" #include "xfs_sb.h" #include "xfs_mount.h" +#include "xfs_defer.h" #include "xfs_da_format.h" #include "xfs_da_btree.h" #include "xfs_inode.h" @@ -4714,7 +4715,8 @@ STATIC int xlog_recover_process_cui( struct xfs_mount *mp, struct xfs_ail *ailp, - struct xfs_log_item *lip) + struct xfs_log_item *lip, + struct xfs_defer_ops *dfops) { struct xfs_cui_log_item *cuip; int error; @@ -4727,7 +4729,7 @@ xlog_recover_process_cui( return 0; spin_unlock(&ailp->xa_lock); - error = xfs_cui_recover(mp, cuip); + error = xfs_cui_recover(mp, cuip, dfops); spin_lock(&ailp->xa_lock); return error; @@ -4754,7 +4756,8 @@ STATIC int xlog_recover_process_bui( struct xfs_mount *mp, struct xfs_ail *ailp, - struct xfs_log_item *lip) + struct xfs_log_item *lip, + struct xfs_defer_ops *dfops) { struct xfs_bui_log_item *buip; int error; @@ -4767,7 +4770,7 @@ xlog_recover_process_bui( return 0; spin_unlock(&ailp->xa_lock); - error = xfs_bui_recover(mp, buip); + error = xfs_bui_recover(mp, buip, dfops); spin_lock(&ailp->xa_lock); return error; @@ -4803,6 +4806,46 @@ static inline bool xlog_item_is_intent(struct xfs_log_item *lip) } } +/* Take all the collected deferred ops and finish them in order. */ +static int +xlog_finish_defer_ops( + struct xfs_mount *mp, + struct xfs_defer_ops *dfops) +{ + struct xfs_trans *tp; + int64_t freeblks; + uint resblks; + int error; + + /* + * We're finishing the defer_ops that accumulated as a result of + * recovering unfinished intent items during log recovery. We + * reserve an itruncate transaction because it is the largest + * permanent transaction type. Since we're the only user of the fs + * right now, take 93% (15/16) of the available free blocks. Use + * weird math to avoid a 64-bit division. + */ + freeblks = percpu_counter_sum(&mp->m_fdblocks); + if (freeblks <= 0) + return -ENOSPC; + resblks = min_t(int64_t, UINT_MAX, freeblks); + resblks = (resblks * 15) >> 4; + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks, + 0, XFS_TRANS_RESERVE, &tp); + if (error) + return error; + + error = xfs_defer_finish(&tp, dfops); + if (error) + goto out_cancel; + + return xfs_trans_commit(tp); + +out_cancel: + xfs_trans_cancel(tp); + return error; +} + /* * When this is called, all of the log intent items which did not have * corresponding log done items should be in the AIL. What we do now @@ -4823,10 +4866,12 @@ STATIC int xlog_recover_process_intents( struct xlog *log) { - struct xfs_log_item *lip; - int error = 0; + struct xfs_defer_ops dfops; struct xfs_ail_cursor cur; + struct xfs_log_item *lip; struct xfs_ail *ailp; + xfs_fsblock_t firstfsb; + int error = 0; #if defined(DEBUG) || defined(XFS_WARN) xfs_lsn_t last_lsn; #endif @@ -4837,6 +4882,7 @@ xlog_recover_process_intents( #if defined(DEBUG) || defined(XFS_WARN) last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block); #endif + xfs_defer_init(&dfops, &firstfsb); while (lip != NULL) { /* * We're done when we see something other than an intent. @@ -4857,6 +4903,12 @@ xlog_recover_process_intents( */ ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0); + /* + * NOTE: If your intent processing routine can create more + * deferred ops, you /must/ attach them to the dfops in this + * routine or else those subsequent intents will get + * replayed in the wrong order! + */ switch (lip->li_type) { case XFS_LI_EFI: error = xlog_recover_process_efi(log->l_mp, ailp, lip); @@ -4865,10 +4917,12 @@ xlog_recover_process_intents( error = xlog_recover_process_rui(log->l_mp, ailp, lip); break; case XFS_LI_CUI: - error = xlog_recover_process_cui(log->l_mp, ailp, lip); + error = xlog_recover_process_cui(log->l_mp, ailp, lip, + &dfops); break; case XFS_LI_BUI: - error = xlog_recover_process_bui(log->l_mp, ailp, lip); + error = xlog_recover_process_bui(log->l_mp, ailp, lip, + &dfops); break; } if (error) @@ -4878,6 +4932,11 @@ xlog_recover_process_intents( out: xfs_trans_ail_cursor_done(&cur); spin_unlock(&ailp->xa_lock); + if (error) + xfs_defer_cancel(&dfops); + else + error = xlog_finish_defer_ops(log->l_mp, &dfops); + return error; } diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c index 8f2e2fac4255d6..3a55d6fc271b1e 100644 --- a/fs/xfs/xfs_refcount_item.c +++ b/fs/xfs/xfs_refcount_item.c @@ -393,7 +393,8 @@ xfs_cud_init( int xfs_cui_recover( struct xfs_mount *mp, - struct xfs_cui_log_item *cuip) + struct xfs_cui_log_item *cuip, + struct xfs_defer_ops *dfops) { int i; int error = 0; @@ -405,11 +406,9 @@ xfs_cui_recover( struct xfs_trans *tp; struct xfs_btree_cur *rcur = NULL; enum xfs_refcount_intent_type type; - xfs_fsblock_t firstfsb; xfs_fsblock_t new_fsb; xfs_extlen_t new_len; struct xfs_bmbt_irec irec; - struct xfs_defer_ops dfops; bool requeue_only = false; ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags)); @@ -465,7 +464,6 @@ xfs_cui_recover( return error; cudp = xfs_trans_get_cud(tp, cuip); - xfs_defer_init(&dfops, &firstfsb); for (i = 0; i < cuip->cui_format.cui_nextents; i++) { refc = &cuip->cui_format.cui_extents[i]; refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK; @@ -485,7 +483,7 @@ xfs_cui_recover( new_len = refc->pe_len; } else error = xfs_trans_log_finish_refcount_update(tp, cudp, - &dfops, type, refc->pe_startblock, refc->pe_len, + dfops, type, refc->pe_startblock, refc->pe_len, &new_fsb, &new_len, &rcur); if (error) goto abort_error; @@ -497,21 +495,21 @@ xfs_cui_recover( switch (type) { case XFS_REFCOUNT_INCREASE: error = xfs_refcount_increase_extent( - tp->t_mountp, &dfops, &irec); + tp->t_mountp, dfops, &irec); break; case XFS_REFCOUNT_DECREASE: error = xfs_refcount_decrease_extent( - tp->t_mountp, &dfops, &irec); + tp->t_mountp, dfops, &irec); break; case XFS_REFCOUNT_ALLOC_COW: error = xfs_refcount_alloc_cow_extent( - tp->t_mountp, &dfops, + tp->t_mountp, dfops, irec.br_startblock, irec.br_blockcount); break; case XFS_REFCOUNT_FREE_COW: error = xfs_refcount_free_cow_extent( - tp->t_mountp, &dfops, + tp->t_mountp, dfops, irec.br_startblock, irec.br_blockcount); break; @@ -525,17 +523,12 @@ xfs_cui_recover( } xfs_refcount_finish_one_cleanup(tp, rcur, error); - error = xfs_defer_finish(&tp, &dfops); - if (error) - goto abort_defer; set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags); error = xfs_trans_commit(tp); return error; abort_error: xfs_refcount_finish_one_cleanup(tp, rcur, error); -abort_defer: - xfs_defer_cancel(&dfops); xfs_trans_cancel(tp); return error; } diff --git a/fs/xfs/xfs_refcount_item.h b/fs/xfs/xfs_refcount_item.h index 5b74dddfa64be7..0e5327349a13ee 100644 --- a/fs/xfs/xfs_refcount_item.h +++ b/fs/xfs/xfs_refcount_item.h @@ -96,6 +96,7 @@ struct xfs_cud_log_item *xfs_cud_init(struct xfs_mount *, struct xfs_cui_log_item *); void xfs_cui_item_free(struct xfs_cui_log_item *); void xfs_cui_release(struct xfs_cui_log_item *); -int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip); +int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip, + struct xfs_defer_ops *dfops); #endif /* __XFS_REFCOUNT_ITEM_H__ */ From 53c045c6d07d78bf0d547352df184c250cc29fe3 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 22 Nov 2017 17:52:24 +0000 Subject: [PATCH 1619/3326] i2c: i2c-boardinfo: fix memory leaks on devinfo [ Upstream commit 66a7c84d677e8e4a5a2ef4afdb9bd52e1399a866 ] Currently when an error occurs devinfo is still allocated but is unused when the error exit paths break out of the for-loop. Fix this by kfree'ing devinfo to avoid the leak. Detected by CoverityScan, CID#1416590 ("Resource Leak") Fixes: 4124c4eba402 ("i2c: allow attaching IRQ resources to i2c_board_info") Fixes: 0daaf99d8424 ("i2c: copy device properties when using i2c_register_board_info()") Signed-off-by: Colin Ian King Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/i2c-boardinfo.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c index 31186ead5a4071..509a6007cdf659 100644 --- a/drivers/i2c/i2c-boardinfo.c +++ b/drivers/i2c/i2c-boardinfo.c @@ -86,6 +86,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig property_entries_dup(info->properties); if (IS_ERR(devinfo->board_info.properties)) { status = PTR_ERR(devinfo->board_info.properties); + kfree(devinfo); break; } } @@ -98,6 +99,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig GFP_KERNEL); if (!devinfo->board_info.resources) { status = -ENOMEM; + kfree(devinfo); break; } } From 0ffb252208a288589251ed0ada38424ccde217d5 Mon Sep 17 00:00:00 2001 From: Eduardo Otubo Date: Thu, 23 Nov 2017 15:18:35 +0100 Subject: [PATCH 1620/3326] xen-netfront: remove warning when unloading module [ Upstream commit 5b5971df3bc2775107ddad164018a8a8db633b81 ] v2: * Replace busy wait with wait_event()/wake_up_all() * Cannot garantee that at the time xennet_remove is called, the xen_netback state will not be XenbusStateClosed, so added a condition for that * There's a small chance for the xen_netback state is XenbusStateUnknown by the time the xen_netfront switches to Closed, so added a condition for that. When unloading module xen_netfront from guest, dmesg would output warning messages like below: [ 105.236836] xen:grant_table: WARNING: g.e. 0x903 still in use! [ 105.236839] deferring g.e. 0x903 (pfn 0x35805) This problem relies on netfront and netback being out of sync. By the time netfront revokes the g.e.'s netback didn't have enough time to free all of them, hence displaying the warnings on dmesg. The trick here is to make netfront to wait until netback frees all the g.e.'s and only then continue to cleanup for the module removal, and this is done by manipulating both device states. Signed-off-by: Eduardo Otubo Acked-by: Juergen Gross Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/xen-netfront.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8b8689c6d88778..391432e2725db1 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -87,6 +87,8 @@ struct netfront_cb { /* IRQ name is queue name with "-tx" or "-rx" appended */ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) +static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); + struct netfront_stats { u64 packets; u64 bytes; @@ -2021,10 +2023,12 @@ static void netback_changed(struct xenbus_device *dev, break; case XenbusStateClosed: + wake_up_all(&module_unload_q); if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state -- fallthrough */ case XenbusStateClosing: + wake_up_all(&module_unload_q); xenbus_frontend_closed(dev); break; } @@ -2130,6 +2134,20 @@ static int xennet_remove(struct xenbus_device *dev) dev_dbg(&dev->dev, "%s\n", dev->nodename); + if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { + xenbus_switch_state(dev, XenbusStateClosing); + wait_event(module_unload_q, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosing); + + xenbus_switch_state(dev, XenbusStateClosed); + wait_event(module_unload_q, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown); + } + xennet_disconnect_backend(info); unregister_netdev(info->netdev); From c900ee9118d7cd0d1e5eba6727dd6c3c37a46e7f Mon Sep 17 00:00:00 2001 From: Thomas Meyer Date: Thu, 10 Aug 2017 10:53:53 +0200 Subject: [PATCH 1621/3326] auxdisplay: img-ascii-lcd: Only build on archs that have IOMEM [ Upstream commit 141cbfba1d0502006463aa80f57c64086226af1a ] This avoids the MODPOST error: ERROR: "devm_ioremap_resource" [drivers/auxdisplay/img-ascii-lcd.ko] undefined! Signed-off-by: Thomas Meyer Acked-by: Randy Dunlap Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/auxdisplay/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index d7d21118d3e0f5..2c2ed9cf879626 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -136,6 +136,7 @@ config CFAG12864B_RATE config IMG_ASCII_LCD tristate "Imagination Technologies ASCII LCD Display" + depends on HAS_IOMEM default y if MIPS_MALTA || MIPS_SEAD3 select SYSCON help From 5f71ff51063177085e7dc2eb33913a2a222700ad Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 3 Nov 2017 08:00:12 -0400 Subject: [PATCH 1622/3326] nfsd: CLOSE SHOULD return the invalid special stateid for NFSv4.x (x>0) [ Upstream commit fb500a7cfee7f2f447d2bbf30cb59629feab6ac1 ] Signed-off-by: Trond Myklebust Signed-off-by: J. Bruce Fields Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/nfs4state.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index a439a70177a46e..61f38346ce9dc5 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -63,6 +63,9 @@ static const stateid_t zero_stateid = { static const stateid_t currentstateid = { .si_generation = 1, }; +static const stateid_t close_stateid = { + .si_generation = 0xffffffffU, +}; static u64 current_sessionid = 1; @@ -5411,6 +5414,11 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, nfsd4_close_open_stateid(stp); mutex_unlock(&stp->st_mutex); + /* See RFC5661 sectionm 18.2.4 */ + if (stp->st_stid.sc_client->cl_minorversion) + memcpy(&close->cl_stateid, &close_stateid, + sizeof(close->cl_stateid)); + /* put reference from nfs4_preprocess_seqid_op */ nfs4_put_stid(&stp->st_stid); out: From 3bd364d156b13a7f5ba6fd5892b1a4439aa1666b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 3 Nov 2017 08:00:15 -0400 Subject: [PATCH 1623/3326] nfsd: Ensure we check stateid validity in the seqid operation checks [ Upstream commit 9271d7e509c1bfc0b9a418caec29ec8d1ac38270 ] After taking the stateid st_mutex, we want to know that the stateid still represents valid state before performing any non-idempotent actions. Signed-off-by: Trond Myklebust Signed-off-by: J. Bruce Fields Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/nfs4state.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 61f38346ce9dc5..15343dd6e4159e 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -5180,15 +5180,9 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_ status = nfsd4_check_seqid(cstate, sop, seqid); if (status) return status; - if (stp->st_stid.sc_type == NFS4_CLOSED_STID - || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID) - /* - * "Closed" stateid's exist *only* to return - * nfserr_replay_me from the previous step, and - * revoked delegations are kept only for free_stateid. - */ - return nfserr_bad_stateid; - mutex_lock(&stp->st_mutex); + status = nfsd4_lock_ol_stateid(stp); + if (status != nfs_ok) + return status; status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); if (status == nfs_ok) status = nfs4_check_fh(current_fh, &stp->st_stid); From e0849eb97066b741c5dacf96349741fad983b7cd Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Mon, 6 Nov 2017 16:22:48 +0300 Subject: [PATCH 1624/3326] grace: replace BUG_ON by WARN_ONCE in exit_net hook [ Upstream commit b872285751c1af010e12d02bce7069e2061a58ca ] Signed-off-by: Vasily Averin Signed-off-by: J. Bruce Fields Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/nfs_common/grace.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c index 420d3a0ab258fb..1bd659938646cb 100644 --- a/fs/nfs_common/grace.c +++ b/fs/nfs_common/grace.c @@ -104,7 +104,9 @@ grace_exit_net(struct net *net) { struct list_head *grace_list = net_generic(net, grace_net_id); - BUG_ON(!list_empty(grace_list)); + WARN_ONCE(!list_empty(grace_list), + "net %x %s: grace_list is not empty\n", + net->ns.inum, __func__); } static struct pernet_operations grace_net_ops = { From 631db7f3895bd4138a7fb0c02f0db1750c79ee23 Mon Sep 17 00:00:00 2001 From: Andrew Elble Date: Thu, 9 Nov 2017 13:41:10 -0500 Subject: [PATCH 1625/3326] nfsd: check for use of the closed special stateid [ Upstream commit ae254dac721d44c0bfebe2795df87459e2e88219 ] Prevent the use of the closed (invalid) special stateid by clients. Signed-off-by: Andrew Elble Signed-off-by: J. Bruce Fields Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/nfs4state.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 15343dd6e4159e..2f80340a1800a8 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -72,6 +72,7 @@ static u64 current_sessionid = 1; #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) +#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t))) /* forward declarations */ static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); @@ -4869,7 +4870,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) struct nfs4_stid *s; __be32 status = nfserr_bad_stateid; - if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) + if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || + CLOSE_STATEID(stateid)) return status; /* Client debugging aid. */ if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { @@ -4927,7 +4929,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, else if (typemask & NFS4_DELEG_STID) typemask |= NFS4_REVOKED_DELEG_STID; - if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) + if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || + CLOSE_STATEID(stateid)) return nfserr_bad_stateid; status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn); if (status == nfserr_stale_clientid) { From 156c80d4e197be386103a0ead001d14c4492ed3c Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Fri, 10 Nov 2017 10:19:26 +0300 Subject: [PATCH 1626/3326] race of lockd inetaddr notifiers vs nlmsvc_rqst change [ Upstream commit 6b18dd1c03e07262ea0866084856b2a3c5ba8d09 ] lockd_inet[6]addr_event use nlmsvc_rqst without taken nlmsvc_mutex, nlmsvc_rqst can be changed during execution of notifiers and crash the host. Patch enables access to nlmsvc_rqst only when it was correctly initialized and delays its cleanup until notifiers are no longer in use. Note that nlmsvc_rqst can be temporally set to ERR_PTR, so the "if (nlmsvc_rqst)" check in notifiers is insufficient on its own. Signed-off-by: Vasily Averin Tested-by: Scott Mayhew Signed-off-by: J. Bruce Fields Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/lockd/svc.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 45e96549ebd2ab..809cbccbad28ac 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -57,6 +57,9 @@ static struct task_struct *nlmsvc_task; static struct svc_rqst *nlmsvc_rqst; unsigned long nlmsvc_timeout; +atomic_t nlm_ntf_refcnt = ATOMIC_INIT(0); +DECLARE_WAIT_QUEUE_HEAD(nlm_ntf_wq); + unsigned int lockd_net_id; /* @@ -292,7 +295,8 @@ static int lockd_inetaddr_event(struct notifier_block *this, struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct sockaddr_in sin; - if (event != NETDEV_DOWN) + if ((event != NETDEV_DOWN) || + !atomic_inc_not_zero(&nlm_ntf_refcnt)) goto out; if (nlmsvc_rqst) { @@ -303,6 +307,8 @@ static int lockd_inetaddr_event(struct notifier_block *this, svc_age_temp_xprts_now(nlmsvc_rqst->rq_server, (struct sockaddr *)&sin); } + atomic_dec(&nlm_ntf_refcnt); + wake_up(&nlm_ntf_wq); out: return NOTIFY_DONE; @@ -319,7 +325,8 @@ static int lockd_inet6addr_event(struct notifier_block *this, struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; struct sockaddr_in6 sin6; - if (event != NETDEV_DOWN) + if ((event != NETDEV_DOWN) || + !atomic_inc_not_zero(&nlm_ntf_refcnt)) goto out; if (nlmsvc_rqst) { @@ -331,6 +338,8 @@ static int lockd_inet6addr_event(struct notifier_block *this, svc_age_temp_xprts_now(nlmsvc_rqst->rq_server, (struct sockaddr *)&sin6); } + atomic_dec(&nlm_ntf_refcnt); + wake_up(&nlm_ntf_wq); out: return NOTIFY_DONE; @@ -347,10 +356,12 @@ static void lockd_unregister_notifiers(void) #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&lockd_inet6addr_notifier); #endif + wait_event(nlm_ntf_wq, atomic_read(&nlm_ntf_refcnt) == 0); } static void lockd_svc_exit_thread(void) { + atomic_dec(&nlm_ntf_refcnt); lockd_unregister_notifiers(); svc_exit_thread(nlmsvc_rqst); } @@ -375,6 +386,7 @@ static int lockd_start_svc(struct svc_serv *serv) goto out_rqst; } + atomic_inc(&nlm_ntf_refcnt); svc_sock_update_bufs(serv); serv->sv_maxconn = nlm_max_connections; From 02cfbaa6cd42cb7bb041519ffd8cd7f3e5a16863 Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Mon, 13 Nov 2017 07:25:40 +0300 Subject: [PATCH 1627/3326] lockd: fix "list_add double add" caused by legacy signal interface [ Upstream commit 81833de1a46edce9ca20cfe079872ac1c20ef359 ] restart_grace() uses hardcoded init_net. It can cause to "list_add double add" in following scenario: 1) nfsd and lockd was started in several net namespaces 2) nfsd in init_net was stopped (lockd was not stopped because it have users from another net namespaces) 3) lockd got signal, called restart_grace() -> set_grace_period() and enabled lock_manager in hardcoded init_net. 4) nfsd in init_net is started again, its lockd_up() calls set_grace_period() and tries to add lock_manager into init_net 2nd time. Jeff Layton suggest: "Make it safe to call locks_start_grace multiple times on the same lock_manager. If it's already on the global grace_list, then don't try to add it again. (But we don't intentionally add twice, so for now we WARN about that case.) With this change, we also need to ensure that the nfsd4 lock manager initializes the list before we call locks_start_grace. While we're at it, move the rest of the nfsd_net initialization into nfs4_state_create_net. I see no reason to have it spread over two functions like it is today." Suggested patch was updated to generate warning in described situation. Suggested-by: Jeff Layton Signed-off-by: Vasily Averin Signed-off-by: J. Bruce Fields Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/nfs_common/grace.c | 6 +++++- fs/nfsd/nfs4state.c | 7 ++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c index 1bd659938646cb..3b13fb3b055301 100644 --- a/fs/nfs_common/grace.c +++ b/fs/nfs_common/grace.c @@ -30,7 +30,11 @@ locks_start_grace(struct net *net, struct lock_manager *lm) struct list_head *grace_list = net_generic(net, grace_net_id); spin_lock(&grace_lock); - list_add(&lm->list, grace_list); + if (list_empty(&lm->list)) + list_add(&lm->list, grace_list); + else + WARN(1, "double list_add attempt detected in net %x %s\n", + net->ns.inum, (net == &init_net) ? "(init_net)" : ""); spin_unlock(&grace_lock); } EXPORT_SYMBOL_GPL(locks_start_grace); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 2f80340a1800a8..d89e6ccd33ba01 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -7021,6 +7021,10 @@ static int nfs4_state_create_net(struct net *net) INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); nn->conf_name_tree = RB_ROOT; nn->unconf_name_tree = RB_ROOT; + nn->boot_time = get_seconds(); + nn->grace_ended = false; + nn->nfsd4_manager.block_opens = true; + INIT_LIST_HEAD(&nn->nfsd4_manager.list); INIT_LIST_HEAD(&nn->client_lru); INIT_LIST_HEAD(&nn->close_lru); INIT_LIST_HEAD(&nn->del_recall_lru); @@ -7078,9 +7082,6 @@ nfs4_state_start_net(struct net *net) ret = nfs4_state_create_net(net); if (ret) return ret; - nn->boot_time = get_seconds(); - nn->grace_ended = false; - nn->nfsd4_manager.block_opens = true; locks_start_grace(net, &nn->nfsd4_manager); nfsd4_client_tracking_init(net); printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n", From 1271aeb0027cad65ae21737d9b0c60440dae9638 Mon Sep 17 00:00:00 2001 From: Robert Lippert Date: Mon, 27 Nov 2017 15:51:55 -0800 Subject: [PATCH 1628/3326] hwmon: (pmbus) Use 64bit math for DIRECT format values [ Upstream commit bd467e4eababe4c04272c1e646f066db02734c79 ] Power values in the 100s of watt range can easily blow past 32bit math limits when processing everything in microwatts. Use 64bit math instead to avoid these issues on common 32bit ARM BMC platforms. Fixes: 442aba78728e ("hwmon: PMBus device driver") Signed-off-by: Robert Lippert Signed-off-by: Guenter Roeck Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/hwmon/pmbus/pmbus_core.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 52a58b8b6e1bd0..a139940cd991a3 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -499,8 +500,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data, static long pmbus_reg2data_direct(struct pmbus_data *data, struct pmbus_sensor *sensor) { - long val = (s16) sensor->data; - long m, b, R; + s64 b, val = (s16)sensor->data; + s32 m, R; m = data->info->m[sensor->class]; b = data->info->b[sensor->class]; @@ -528,11 +529,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data, R--; } while (R < 0) { - val = DIV_ROUND_CLOSEST(val, 10); + val = div_s64(val + 5LL, 10L); /* round closest */ R++; } - return (val - b) / m; + val = div_s64(val - b, m); + return clamp_val(val, LONG_MIN, LONG_MAX); } /* @@ -656,7 +658,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data, static u16 pmbus_data2reg_direct(struct pmbus_data *data, struct pmbus_sensor *sensor, long val) { - long m, b, R; + s64 b, val64 = val; + s32 m, R; m = data->info->m[sensor->class]; b = data->info->b[sensor->class]; @@ -673,18 +676,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data, R -= 3; /* Adjust R and b for data in milli-units */ b *= 1000; } - val = val * m + b; + val64 = val64 * m + b; while (R > 0) { - val *= 10; + val64 *= 10; R--; } while (R < 0) { - val = DIV_ROUND_CLOSEST(val, 10); + val64 = div_s64(val64 + 5LL, 10L); /* round closest */ R++; } - return val; + return (u16)clamp_val(val64, S16_MIN, S16_MAX); } static u16 pmbus_data2reg_vid(struct pmbus_data *data, From 606592f53b877dc77ec1356ea82ea1d7bb6e8580 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Tue, 28 Nov 2017 23:01:44 +0800 Subject: [PATCH 1629/3326] quota: propagate error from __dquot_initialize [ Upstream commit 1a6152d36dee08da2be2a3030dceb45ef680460a ] In commit 6184fc0b8dd7 ("quota: Propagate error from ->acquire_dquot()"), we have propagated error from __dquot_initialize to caller, but we forgot to handle such error in add_dquot_ref(), so, currently, during quota accounting information initialization flow, if we failed for some of inodes, we just ignore such error, and do account for others, which is not a good implementation. In this patch, we choose to let user be aware of such error, so after turning on quota successfully, we can make sure all inodes disk usage can be accounted, which will be more reasonable. Suggested-by: Jan Kara Signed-off-by: Chao Yu Signed-off-by: Jan Kara Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/quota/dquot.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 9f78b5015f2ec3..8406be97a518aa 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -934,12 +934,13 @@ static int dqinit_needed(struct inode *inode, int type) } /* This routine is guarded by s_umount semaphore */ -static void add_dquot_ref(struct super_block *sb, int type) +static int add_dquot_ref(struct super_block *sb, int type) { struct inode *inode, *old_inode = NULL; #ifdef CONFIG_QUOTA_DEBUG int reserved = 0; #endif + int err = 0; spin_lock(&sb->s_inode_list_lock); list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { @@ -959,7 +960,11 @@ static void add_dquot_ref(struct super_block *sb, int type) reserved = 1; #endif iput(old_inode); - __dquot_initialize(inode, type); + err = __dquot_initialize(inode, type); + if (err) { + iput(inode); + goto out; + } /* * We hold a reference to 'inode' so it couldn't have been @@ -974,7 +979,7 @@ static void add_dquot_ref(struct super_block *sb, int type) } spin_unlock(&sb->s_inode_list_lock); iput(old_inode); - +out: #ifdef CONFIG_QUOTA_DEBUG if (reserved) { quota_error(sb, "Writes happened before quota was turned on " @@ -982,6 +987,7 @@ static void add_dquot_ref(struct super_block *sb, int type) "Please run quotacheck(8)"); } #endif + return err; } /* @@ -2372,10 +2378,11 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, dqopt->flags |= dquot_state_flag(flags, type); spin_unlock(&dq_state_lock); - add_dquot_ref(sb, type); - - return 0; + error = add_dquot_ref(sb, type); + if (error) + dquot_disable(sb, type, flags); + return error; out_file_init: dqopt->files[type] = NULL; iput(inode); From 5e60a297e78dd7b15679751b5a28428a633161f8 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 28 Nov 2017 14:19:48 +0100 Subject: [PATCH 1630/3326] net: mvpp2: fix the txq_init error path [ Upstream commit ba2d8d887d962c2f790e6dc01b2fd25b4608720b ] When an allocation in the txq_init path fails, the allocated buffers end-up being freed twice: in the txq_init error path, and in txq_deinit. This lead to issues as txq_deinit would work on already freed memory regions: kernel BUG at mm/slub.c:3915! Internal error: Oops - BUG: 0 [#1] PREEMPT SMP This patch fixes this by removing the txq_init own error path, as the txq_deinit function is always called on errors. This was introduced by TSO as way more buffers are allocated. Fixes: 186cd4d4e414 ("net: mvpp2: software tso support") Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/marvell/mvpp2.c | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index d147dc7d0f7776..1dd3a1264a53e8 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -5597,7 +5597,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, sizeof(*txq_pcpu->buffs), GFP_KERNEL); if (!txq_pcpu->buffs) - goto cleanup; + return -ENOMEM; txq_pcpu->count = 0; txq_pcpu->reserved_num = 0; @@ -5610,26 +5610,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port, &txq_pcpu->tso_headers_dma, GFP_KERNEL); if (!txq_pcpu->tso_headers) - goto cleanup; + return -ENOMEM; } return 0; -cleanup: - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - kfree(txq_pcpu->buffs); - - dma_free_coherent(port->dev->dev.parent, - txq_pcpu->size * TSO_HEADER_SIZE, - txq_pcpu->tso_headers, - txq_pcpu->tso_headers_dma); - } - - dma_free_coherent(port->dev->dev.parent, - txq->size * MVPP2_DESC_ALIGNED_SIZE, - txq->descs, txq->descs_dma); - - return -ENOMEM; } /* Free allocated TXQ resources */ From c25d803a3b7d82e9549c081e30ce1b62bc1dc9d2 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 28 Nov 2017 14:26:30 +0100 Subject: [PATCH 1631/3326] net: phy: marvell10g: fix the PHY id mask [ Upstream commit 952b6b3b07877419386e719ff20917170e1ce684 ] The Marvell 10G PHY driver supports different hardware revisions, which have their bits 3..0 differing. To get the correct revision number these bits should be ignored. This patch fixes this by using the already defined MARVELL_PHY_ID_MASK (0xfffffff0) instead of the custom 0xffffffff mask. Fixes: 20b2af32ff3f ("net: phy: add Marvell Alaska X 88X3310 10Gigabit PHY support") Suggested-by: Yan Markman Signed-off-by: Antoine Tenart Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/marvell10g.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index aebc08beceba33..21b3f36e023a96 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -16,6 +16,7 @@ * link takes priority and the other port is completely locked out. */ #include +#include enum { MV_PCS_BASE_T = 0x0000, @@ -338,7 +339,7 @@ static int mv3310_read_status(struct phy_device *phydev) static struct phy_driver mv3310_drivers[] = { { .phy_id = 0x002b09aa, - .phy_id_mask = 0xffffffff, + .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "mv88x3310", .features = SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | @@ -360,7 +361,7 @@ static struct phy_driver mv3310_drivers[] = { module_phy_driver(mv3310_drivers); static struct mdio_device_id __maybe_unused mv3310_tbl[] = { - { 0x002b09aa, 0xffffffff }, + { 0x002b09aa, MARVELL_PHY_ID_MASK }, { }, }; MODULE_DEVICE_TABLE(mdio, mv3310_tbl); From b274406266744e706c5b741cafa0013e7a032655 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 21 Nov 2017 20:46:49 +0100 Subject: [PATCH 1632/3326] bnxt_en: Fix an error handling path in 'bnxt_get_module_eeprom()' [ Upstream commit dea521a2b9f96e905fa2bb2f95e23ec00c2ec436 ] Error code returned by 'bnxt_read_sfp_module_eeprom_info()' is handled a few lines above when reading the A0 portion of the EEPROM. The same should be done when reading the A2 portion of the EEPROM. In order to correctly propagate an error, update 'rc' in this 2nd call as well, otherwise 0 (success) is returned. Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 3cbe771b335296..a22336fef66b23 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2133,8 +2133,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev, /* Read A2 portion of the EEPROM */ if (length) { start -= ETH_MODULE_SFF_8436_LEN; - bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start, - length, data); + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, + start, length, data); } return rc; } From f268e508aeb20f09de9a258ac5d2e41983567775 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 17 Nov 2017 01:54:00 +0000 Subject: [PATCH 1633/3326] Btrfs: incremental send, fix wrong unlink path after renaming file [ Upstream commit ea37d5998b50a72b9045ba60a132eeb20e1c4230 ] Under some circumstances, an incremental send operation can issue wrong paths for unlink commands related to files that have multiple hard links and some (or all) of those links were renamed between the parent and send snapshots. Consider the following example: Parent snapshot . (ino 256) |---- a/ (ino 257) | |---- b/ (ino 259) | | |---- c/ (ino 260) | | |---- f2 (ino 261) | | | |---- f2l1 (ino 261) | |---- d/ (ino 262) |---- f1l1_2 (ino 258) |---- f2l2 (ino 261) |---- f1_2 (ino 258) Send snapshot . (ino 256) |---- a/ (ino 257) | |---- f2l1/ (ino 263) | |---- b2/ (ino 259) | |---- c/ (ino 260) | | |---- d3 (ino 262) | | |---- f1l1_2 (ino 258) | | |---- f2l2_2 (ino 261) | | |---- f1_2 (ino 258) | | | |---- f2 (ino 261) | |---- f1l2 (ino 258) | |---- d (ino 261) When computing the incremental send stream the following steps happen: 1) When processing inode 261, a rename operation is issued that renames inode 262, which currently as a path of "d", to an orphan name of "o262-7-0". This is done because in the send snapshot, inode 261 has of its hard links with a path of "d" as well. 2) Two link operations are issued that create the new hard links for inode 261, whose names are "d" and "f2l2_2", at paths "/" and "o262-7-0/" respectively. 3) Still while processing inode 261, unlink operations are issued to remove the old hard links of inode 261, with names "f2l1" and "f2l2", at paths "a/" and "d/". However path "d/" does not correspond anymore to the directory inode 262 but corresponds instead to a hard link of inode 261 (link command issued in the previous step). This makes the receiver fail with a ENOTDIR error when attempting the unlink operation. The problem happens because before sending the unlink operation, we failed to detect that inode 262 was one of ancestors for inode 261 in the parent snapshot, and therefore we didn't recompute the path for inode 262 before issuing the unlink operation for the link named "f2l2" of inode 262. The detection failed because the function "is_ancestor()" only follows the first hard link it finds for an inode instead of all of its hard links (as it was originally created for being used with directories only, for which only one hard link exists). So fix this by making "is_ancestor()" follow all hard links of the input inode. A test case for fstests follows soon. Signed-off-by: Filipe Manana Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/send.c | 124 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 106 insertions(+), 18 deletions(-) diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 8fd195cfe81b97..2c35717a3470a6 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -3527,7 +3527,40 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx, } /* - * Check if ino ino1 is an ancestor of inode ino2 in the given root. + * Check if inode ino2, or any of its ancestors, is inode ino1. + * Return 1 if true, 0 if false and < 0 on error. + */ +static int check_ino_in_path(struct btrfs_root *root, + const u64 ino1, + const u64 ino1_gen, + const u64 ino2, + const u64 ino2_gen, + struct fs_path *fs_path) +{ + u64 ino = ino2; + + if (ino1 == ino2) + return ino1_gen == ino2_gen; + + while (ino > BTRFS_FIRST_FREE_OBJECTID) { + u64 parent; + u64 parent_gen; + int ret; + + fs_path_reset(fs_path); + ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path); + if (ret < 0) + return ret; + if (parent == ino1) + return parent_gen == ino1_gen; + ino = parent; + } + return 0; +} + +/* + * Check if ino ino1 is an ancestor of inode ino2 in the given root for any + * possible path (in case ino2 is not a directory and has multiple hard links). * Return 1 if true, 0 if false and < 0 on error. */ static int is_ancestor(struct btrfs_root *root, @@ -3536,36 +3569,91 @@ static int is_ancestor(struct btrfs_root *root, const u64 ino2, struct fs_path *fs_path) { - u64 ino = ino2; - bool free_path = false; + bool free_fs_path = false; int ret = 0; + struct btrfs_path *path = NULL; + struct btrfs_key key; if (!fs_path) { fs_path = fs_path_alloc(); if (!fs_path) return -ENOMEM; - free_path = true; + free_fs_path = true; } - while (ino > BTRFS_FIRST_FREE_OBJECTID) { - u64 parent; - u64 parent_gen; + path = alloc_path_for_send(); + if (!path) { + ret = -ENOMEM; + goto out; + } - fs_path_reset(fs_path); - ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path); - if (ret < 0) { - if (ret == -ENOENT && ino == ino2) - ret = 0; - goto out; + key.objectid = ino2; + key.type = BTRFS_INODE_REF_KEY; + key.offset = 0; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + + while (true) { + struct extent_buffer *leaf = path->nodes[0]; + int slot = path->slots[0]; + u32 cur_offset = 0; + u32 item_size; + + if (slot >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto out; + if (ret > 0) + break; + continue; } - if (parent == ino1) { - ret = parent_gen == ino1_gen ? 1 : 0; - goto out; + + btrfs_item_key_to_cpu(leaf, &key, slot); + if (key.objectid != ino2) + break; + if (key.type != BTRFS_INODE_REF_KEY && + key.type != BTRFS_INODE_EXTREF_KEY) + break; + + item_size = btrfs_item_size_nr(leaf, slot); + while (cur_offset < item_size) { + u64 parent; + u64 parent_gen; + + if (key.type == BTRFS_INODE_EXTREF_KEY) { + unsigned long ptr; + struct btrfs_inode_extref *extref; + + ptr = btrfs_item_ptr_offset(leaf, slot); + extref = (struct btrfs_inode_extref *) + (ptr + cur_offset); + parent = btrfs_inode_extref_parent(leaf, + extref); + cur_offset += sizeof(*extref); + cur_offset += btrfs_inode_extref_name_len(leaf, + extref); + } else { + parent = key.offset; + cur_offset = item_size; + } + + ret = get_inode_info(root, parent, NULL, &parent_gen, + NULL, NULL, NULL, NULL); + if (ret < 0) + goto out; + ret = check_ino_in_path(root, ino1, ino1_gen, + parent, parent_gen, fs_path); + if (ret) + goto out; } - ino = parent; + path->slots[0]++; } + ret = 0; out: - if (free_path) + btrfs_free_path(path); + if (free_fs_path) fs_path_free(fs_path); return ret; } From 95a7d23415b79c94b4809200f29f458520e9fc56 Mon Sep 17 00:00:00 2001 From: Minwoo Im Date: Sat, 25 Nov 2017 03:03:00 +0900 Subject: [PATCH 1634/3326] nvme-pci: fix NULL pointer dereference in nvme_free_host_mem() [ Upstream commit 7e5dd57ef3081ff6c03908d786ed5087f6fbb7ae ] Following condition which will cause NULL pointer dereference will occur in nvme_free_host_mem() when it tries to remove pci device via nvme_remove() especially after a failure of host memory allocation for HMB. "(host_mem_descs == NULL) && (nr_host_mem_descs != 0)" It's because __nr_host_mem_descs__ is not cleared to 0 unlike __host_mem_descs__ is so. Signed-off-by: Minwoo Im Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/host/pci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5dae650438c17d..cdd2fd509ddc7e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1617,6 +1617,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev) dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), dev->host_mem_descs, dev->host_mem_descs_dma); dev->host_mem_descs = NULL; + dev->nr_host_mem_descs = 0; } static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, From f730601cdead1ed14be5089e14d8ffcb019373e5 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Thu, 23 Nov 2017 17:13:40 +0100 Subject: [PATCH 1635/3326] xfs: fortify xfs_alloc_buftarg error handling [ Upstream commit d210a9874b8f6166579408131cb74495caff1958 ] percpu_counter_init failure path doesn't clean up &btp->bt_lru list. Call list_lru_destroy in that error path. Similarly register_shrinker error path is not handled. While it is unlikely to trigger these error path, it is not impossible especially the later might fail with large NUMAs. Let's handle the failure to make the code more robust. Noticed-by: Tetsuo Handa Signed-off-by: Michal Hocko Acked-by: Dave Chinner Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/xfs/xfs_buf.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 2f97c12ca75e4d..16f93d7356b706 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1813,22 +1813,27 @@ xfs_alloc_buftarg( btp->bt_daxdev = dax_dev; if (xfs_setsize_buftarg_early(btp, bdev)) - goto error; + goto error_free; if (list_lru_init(&btp->bt_lru)) - goto error; + goto error_free; if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL)) - goto error; + goto error_lru; btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count; btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan; btp->bt_shrinker.seeks = DEFAULT_SEEKS; btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE; - register_shrinker(&btp->bt_shrinker); + if (register_shrinker(&btp->bt_shrinker)) + goto error_pcpu; return btp; -error: +error_pcpu: + percpu_counter_destroy(&btp->bt_io_count); +error_lru: + list_lru_destroy(&btp->bt_lru); +error_free: kmem_free(btp); return NULL; } From 575c548137f8b397b5e32c66a972c5f0ebcc1cd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 24 Nov 2017 11:39:30 +0100 Subject: [PATCH 1636/3326] drm/amdgpu: don't try to move pinned BOs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 6edc6910ba4cd6eab309263539c8f09b8ad772bf ] Never try to move pinned BOs during CS. Signed-off-by: Christian König Reviewed-by: Michel Dänzer Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 60d8bedb694db7..b5aa8e6f8e0beb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -403,6 +403,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, if (candidate->robj == validated) break; + /* We can't move pinned BOs here */ + if (bo->pin_count) + continue; + other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); /* Check if this BO is in one of the domains we need space for */ From 600c904b919a6c3e36cc93754c8941a7b0a1ec6b Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 29 Nov 2017 11:01:09 +0100 Subject: [PATCH 1637/3326] net: ethernet: xilinx: Mark XILINX_LL_TEMAC broken on 64-bit [ Upstream commit 15bfe05c8d6386f1a90e9340d15336e85e32aad6 ] On 64-bit (e.g. powerpc64/allmodconfig): drivers/net/ethernet/xilinx/ll_temac_main.c: In function 'temac_start_xmit_done': drivers/net/ethernet/xilinx/ll_temac_main.c:633:22: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); ^ cdmac_bd.app4 is u32, so it is too small to hold a kernel pointer. Note that several other fields in struct cdmac_bd are also too small to hold physical addresses on 64-bit platforms. Signed-off-by: Geert Uytterhoeven Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/xilinx/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index 6d68c8a8f4f2ac..da4ec575ccf9ba 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -34,6 +34,7 @@ config XILINX_AXI_EMAC config XILINX_LL_TEMAC tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" depends on (PPC || MICROBLAZE) + depends on !64BIT || BROKEN select PHYLIB ---help--- This driver supports the Xilinx 10/100/1000 LocalLink TEMAC From 8f8b2c79c4a39a7a0a1f5eb079586fce92794dc2 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Wed, 29 Nov 2017 22:34:50 +0900 Subject: [PATCH 1638/3326] quota: Check for register_shrinker() failure. [ Upstream commit 88bc0ede8d35edc969350852894dc864a2dc1859 ] register_shrinker() might return -ENOMEM error since Linux 3.12. Call panic() as with other failure checks in this function if register_shrinker() failed. Fixes: 1d3d4437eae1 ("vmscan: per-node deferred work") Signed-off-by: Tetsuo Handa Cc: Jan Kara Cc: Michal Hocko Reviewed-by: Michal Hocko Signed-off-by: Jan Kara Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/quota/dquot.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 8406be97a518aa..4cd0c23366243a 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2985,7 +2985,8 @@ static int __init dquot_init(void) pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld," " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order)); - register_shrinker(&dqcache_shrinker); + if (register_shrinker(&dqcache_shrinker)) + panic("Cannot register dquot shrinker"); return 0; } From ca4b61373ee86e15b4469285fdd23f9c201fc13d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 24 Nov 2017 12:00:24 -0500 Subject: [PATCH 1639/3326] SUNRPC: Allow connect to return EHOSTUNREACH [ Upstream commit 4ba161a793d5f43757c35feff258d9f20a082940 ] Reported-by: Dmitry Vyukov Signed-off-by: Trond Myklebust Tested-by: Dmitry Vyukov Signed-off-by: Anna Schumaker Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/sunrpc/xprtsock.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 4dad5da388d610..8cb40f8ffa5b41 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2437,6 +2437,7 @@ static void xs_tcp_setup_socket(struct work_struct *work) case -ECONNREFUSED: case -ECONNRESET: case -ENETUNREACH: + case -EHOSTUNREACH: case -EADDRINUSE: case -ENOBUFS: /* From bf6a04c3ffe1486ae7e0ad8fb9f10af9bad6d007 Mon Sep 17 00:00:00 2001 From: "Liu, Changcheng" Date: Wed, 29 Nov 2017 16:10:25 -0800 Subject: [PATCH 1640/3326] scripts/faddr2line: extend usage on generic arch [ Upstream commit 95a87982541932503d3f59aba4c30b0bde0a6294 ] When cross-compiling, fadd2line should use the binary tool used for the target system, rather than that of the host. Link: http://lkml.kernel.org/r/20171121092911.GA150711@sofia Signed-off-by: Liu Changcheng Cc: Kate Stewart Cc: NeilBrown Cc: Thomas Gleixner Cc: Greg Kroah-Hartman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- scripts/faddr2line | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/scripts/faddr2line b/scripts/faddr2line index 1f5ce959f5965b..39e07d8574dd78 100755 --- a/scripts/faddr2line +++ b/scripts/faddr2line @@ -44,9 +44,16 @@ set -o errexit set -o nounset +READELF="${CROSS_COMPILE}readelf" +ADDR2LINE="${CROSS_COMPILE}addr2line" +SIZE="${CROSS_COMPILE}size" +NM="${CROSS_COMPILE}nm" + command -v awk >/dev/null 2>&1 || die "awk isn't installed" -command -v readelf >/dev/null 2>&1 || die "readelf isn't installed" -command -v addr2line >/dev/null 2>&1 || die "addr2line isn't installed" +command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed" +command -v ${ADDR2LINE} >/dev/null 2>&1 || die "addr2line isn't installed" +command -v ${SIZE} >/dev/null 2>&1 || die "size isn't installed" +command -v ${NM} >/dev/null 2>&1 || die "nm isn't installed" usage() { echo "usage: faddr2line ..." >&2 @@ -69,10 +76,10 @@ die() { find_dir_prefix() { local objfile=$1 - local start_kernel_addr=$(readelf -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}') + local start_kernel_addr=$(${READELF} -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}') [[ -z $start_kernel_addr ]] && return - local file_line=$(addr2line -e $objfile $start_kernel_addr) + local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr) [[ -z $file_line ]] && return local prefix=${file_line%init/main.c:*} @@ -104,7 +111,7 @@ __faddr2line() { # Go through each of the object's symbols which match the func name. # In rare cases there might be duplicates. - file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}') + file_end=$(${SIZE} -Ax $objfile | awk '$1 == ".text" {print $2}') while read symbol; do local fields=($symbol) local sym_base=0x${fields[0]} @@ -156,10 +163,10 @@ __faddr2line() { # pass real address to addr2line echo "$func+$offset/$sym_size:" - addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;" + ${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;" DONE=1 - done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }') + done < <(${NM} -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }') } [[ $# -lt 2 ]] && usage From b2ba0bd3469546e5bf6f32a0bb67d6ccf21d4584 Mon Sep 17 00:00:00 2001 From: Yisheng Xie Date: Wed, 29 Nov 2017 16:11:08 -0800 Subject: [PATCH 1641/3326] kmemleak: add scheduling point to kmemleak_scan() [ Upstream commit bde5f6bc68db51128f875a756e9082a6c6ff7b4c ] kmemleak_scan() will scan struct page for each node and it can be really large and resulting in a soft lockup. We have seen a soft lockup when do scan while compile kernel: watchdog: BUG: soft lockup - CPU#53 stuck for 22s! [bash:10287] [...] Call Trace: kmemleak_scan+0x21a/0x4c0 kmemleak_write+0x312/0x350 full_proxy_write+0x5a/0xa0 __vfs_write+0x33/0x150 vfs_write+0xad/0x1a0 SyS_write+0x52/0xc0 do_syscall_64+0x61/0x1a0 entry_SYSCALL64_slow_path+0x25/0x25 Fix this by adding cond_resched every MAX_SCAN_SIZE. Link: http://lkml.kernel.org/r/1511439788-20099-1-git-send-email-xieyisheng1@huawei.com Signed-off-by: Yisheng Xie Suggested-by: Catalin Marinas Acked-by: Catalin Marinas Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- mm/kmemleak.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 7780cd83a4956f..a1ba553816eb39 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1532,6 +1532,8 @@ static void kmemleak_scan(void) if (page_count(page) == 0) continue; scan_block(page, page + 1, NULL); + if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page)))) + cond_resched(); } } put_online_mems(); From 345f165a31b8a287b15e91e5aad03d588f1bd141 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 14 Nov 2017 11:16:47 -0800 Subject: [PATCH 1642/3326] drm/bridge: Fix lvds-encoder since the panel_bridge rework. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit dbb58bfd9ae6c885b2ca001a9a5ab8b881fb4ba9 ] The panel_bridge bridge attaches to the panel's OF node, not the lvds-encoder's node. Put in a little no-op bridge of our own so that our consumers can still find a bridge where they expect. This also fixes an unintended unregistration and leak of the panel-bridge on module remove. Signed-off-by: Eric Anholt Fixes: 13dfc0540a57 ("drm/bridge: Refactor out the panel wrapper from the lvds-encoder bri dge.") Tested-by: Lothar Waßmann Signed-off-by: Archit Taneja Link: https://patchwork.freedesktop.org/patch/msgid/20171114191647.22207-1-eric@anholt.net Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/bridge/lvds-encoder.c | 48 +++++++++++++++++++++++---- 1 file changed, 41 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c index 0903ba574f61c4..75b0d3f6e4de91 100644 --- a/drivers/gpu/drm/bridge/lvds-encoder.c +++ b/drivers/gpu/drm/bridge/lvds-encoder.c @@ -13,13 +13,37 @@ #include +struct lvds_encoder { + struct drm_bridge bridge; + struct drm_bridge *panel_bridge; +}; + +static int lvds_encoder_attach(struct drm_bridge *bridge) +{ + struct lvds_encoder *lvds_encoder = container_of(bridge, + struct lvds_encoder, + bridge); + + return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge, + bridge); +} + +static struct drm_bridge_funcs funcs = { + .attach = lvds_encoder_attach, +}; + static int lvds_encoder_probe(struct platform_device *pdev) { struct device_node *port; struct device_node *endpoint; struct device_node *panel_node; struct drm_panel *panel; - struct drm_bridge *bridge; + struct lvds_encoder *lvds_encoder; + + lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder), + GFP_KERNEL); + if (!lvds_encoder) + return -ENOMEM; /* Locate the panel DT node. */ port = of_graph_get_port_by_id(pdev->dev.of_node, 1); @@ -49,20 +73,30 @@ static int lvds_encoder_probe(struct platform_device *pdev) return -EPROBE_DEFER; } - bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS); - if (IS_ERR(bridge)) - return PTR_ERR(bridge); + lvds_encoder->panel_bridge = + devm_drm_panel_bridge_add(&pdev->dev, + panel, DRM_MODE_CONNECTOR_LVDS); + if (IS_ERR(lvds_encoder->panel_bridge)) + return PTR_ERR(lvds_encoder->panel_bridge); + + /* The panel_bridge bridge is attached to the panel's of_node, + * but we need a bridge attached to our of_node for our user + * to look up. + */ + lvds_encoder->bridge.of_node = pdev->dev.of_node; + lvds_encoder->bridge.funcs = &funcs; + drm_bridge_add(&lvds_encoder->bridge); - platform_set_drvdata(pdev, bridge); + platform_set_drvdata(pdev, lvds_encoder); return 0; } static int lvds_encoder_remove(struct platform_device *pdev) { - struct drm_bridge *bridge = platform_get_drvdata(pdev); + struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev); - drm_bridge_remove(bridge); + drm_bridge_remove(&lvds_encoder->bridge); return 0; } From 6d734363f2c5e782384765c5a96caa4473645b8d Mon Sep 17 00:00:00 2001 From: Andrey Gusakov Date: Tue, 7 Nov 2017 19:56:19 +0300 Subject: [PATCH 1643/3326] drm/bridge: tc358767: do no fail on hi-res displays [ Upstream commit cffd2b16c01c3431a7a7dd62e722af33490fc436 ] Do not fail data rates higher than 2.7 and more than 2 lanes. Try to fall back to 2.7Gbps and 2 lanes. Acked-by: Philipp Zabel Reviewed-by: Andrzej Hajda Signed-off-by: Andrey Gusakov Signed-off-by: Andrzej Hajda Link: https://patchwork.freedesktop.org/patch/msgid/1510073785-16108-2-git-send-email-andrey.gusakov@cogentembedded.com Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/bridge/tc358767.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 8571cfd877c520..e1996dbfb0a18f 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -603,8 +603,15 @@ static int tc_get_display_props(struct tc_data *tc) ret = drm_dp_link_probe(&tc->aux, &tc->link.base); if (ret < 0) goto err_dpcd_read; - if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000)) - goto err_dpcd_inval; + if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) { + dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n"); + tc->link.base.rate = 270000; + } + + if (tc->link.base.num_lanes > 2) { + dev_dbg(tc->dev, "Falling to 2 lanes\n"); + tc->link.base.num_lanes = 2; + } ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp); if (ret < 0) @@ -637,9 +644,6 @@ static int tc_get_display_props(struct tc_data *tc) err_dpcd_read: dev_err(tc->dev, "failed to read DPCD: %d\n", ret); return ret; -err_dpcd_inval: - dev_err(tc->dev, "invalid DPCD\n"); - return -EINVAL; } static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) From 859bacc13cbb34a86f6140430f9fdb767c33d7d4 Mon Sep 17 00:00:00 2001 From: Andrey Gusakov Date: Tue, 7 Nov 2017 19:56:20 +0300 Subject: [PATCH 1644/3326] drm/bridge: tc358767: filter out too high modes [ Upstream commit 99fc8e963a4c0203dba26a77cf737db6081bca14 ] Pixel clock limitation for DPI is 154 MHz. Do not accept modes with higher pixel clock rate. Reviewed-by: Andrzej Hajda Signed-off-by: Andrey Gusakov Signed-off-by: Andrzej Hajda Link: https://patchwork.freedesktop.org/patch/msgid/1510073785-16108-3-git-send-email-andrey.gusakov@cogentembedded.com Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/bridge/tc358767.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index e1996dbfb0a18f..e571b5e29ae786 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1103,7 +1103,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge, static int tc_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - /* Accept any mode */ + /* DPI interface clock limitation: upto 154 MHz */ + if (mode->clock > 154000) + return MODE_CLOCK_HIGH; + return MODE_OK; } From 0106381870a49d1ce47501410b4a7433e505aa71 Mon Sep 17 00:00:00 2001 From: Andrey Gusakov Date: Tue, 7 Nov 2017 19:56:21 +0300 Subject: [PATCH 1645/3326] drm/bridge: tc358767: fix DP0_MISC register set [ Upstream commit f3b8adbe1911f66fd3cab1aaa74f0f66b7ceda25 ] Remove shift from TU_SIZE_RECOMMENDED define as it used to calculate max_tu_symbols. Acked-by: Philipp Zabel Signed-off-by: Andrey Gusakov Signed-off-by: Andrzej Hajda Link: https://patchwork.freedesktop.org/patch/msgid/1510073785-16108-4-git-send-email-andrey.gusakov@cogentembedded.com Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/bridge/tc358767.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index e571b5e29ae786..7334cb2121a383 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -97,7 +97,7 @@ #define DP0_ACTIVEVAL 0x0650 #define DP0_SYNCVAL 0x0654 #define DP0_MISC 0x0658 -#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */ +#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ #define BPC_6 (0 << 5) #define BPC_8 (1 << 5) @@ -716,7 +716,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) * Must be less than tu_size. */ max_tu_symbol = TU_SIZE_RECOMMENDED - 1; - tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8); + tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) | + BPC_8); return 0; err: From b536eb986e45e2347bd4cfaeff0b48d69fde52d1 Mon Sep 17 00:00:00 2001 From: Andrey Gusakov Date: Tue, 7 Nov 2017 19:56:22 +0300 Subject: [PATCH 1646/3326] drm/bridge: tc358767: fix timing calculations [ Upstream commit 66d1c3b94d5d59e4325e61a78d520f92c043d645 ] Fields in HTIM01 and HTIM02 regs should be even. Recomended thresh_dly value is max_tu_symbol. Remove set of VPCTRL0.VSDELAY as it is related to DSI input interface. Currently driver supports only DPI. Acked-by: Philipp Zabel Signed-off-by: Andrey Gusakov Signed-off-by: Andrzej Hajda Link: https://patchwork.freedesktop.org/patch/msgid/1510073785-16108-5-git-send-email-andrey.gusakov@cogentembedded.com Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/bridge/tc358767.c | 34 ++++++++++++++++++------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 7334cb2121a383..b916346b933a5f 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -659,6 +659,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) int lower_margin = mode->vsync_start - mode->vdisplay; int vsync_len = mode->vsync_end - mode->vsync_start; + /* + * Recommended maximum number of symbols transferred in a transfer unit: + * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size, + * (output active video bandwidth in bytes)) + * Must be less than tu_size. + */ + max_tu_symbol = TU_SIZE_RECOMMENDED - 1; + dev_dbg(tc->dev, "set mode %dx%d\n", mode->hdisplay, mode->vdisplay); dev_dbg(tc->dev, "H margin %d,%d sync %d\n", @@ -668,13 +676,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal); - /* LCD Ctl Frame Size */ - tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ | + /* + * LCD Ctl Frame Size + * datasheet is not clear of vsdelay in case of DPI + * assume we do not need any delay when DPI is a source of + * sync signals + */ + tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ | OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED); - tc_write(HTIM01, (left_margin << 16) | /* H back porch */ - (hsync_len << 0)); /* Hsync */ - tc_write(HTIM02, (right_margin << 16) | /* H front porch */ - (mode->hdisplay << 0)); /* width */ + tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */ + (ALIGN(hsync_len, 2) << 0)); /* Hsync */ + tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */ + (ALIGN(mode->hdisplay, 2) << 0)); /* width */ tc_write(VTIM01, (upper_margin << 16) | /* V back porch */ (vsync_len << 0)); /* Vsync */ tc_write(VTIM02, (lower_margin << 16) | /* V front porch */ @@ -693,7 +706,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) /* DP Main Stream Attributes */ vid_sync_dly = hsync_len + left_margin + mode->hdisplay; tc_write(DP0_VIDSYNCDELAY, - (0x003e << 16) | /* thresh_dly */ + (max_tu_symbol << 16) | /* thresh_dly */ (vid_sync_dly << 0)); tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal)); @@ -709,13 +722,6 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); - /* - * Recommended maximum number of symbols transferred in a transfer unit: - * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size, - * (output active video bandwidth in bytes)) - * Must be less than tu_size. - */ - max_tu_symbol = TU_SIZE_RECOMMENDED - 1; tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) | BPC_8); From 340c9a4ba3031f87fcbfa0d89332d4a5aa468aaf Mon Sep 17 00:00:00 2001 From: Andrey Gusakov Date: Tue, 7 Nov 2017 19:56:23 +0300 Subject: [PATCH 1647/3326] drm/bridge: tc358767: fix AUXDATAn registers access [ Upstream commit 9217c1abbc145a77d65c476cf2004a3df02104c7 ] First four bytes should go to DP0_AUXWDATA0. Due to bug if len > 4 first four bytes was writen to DP0_AUXWDATA1 and all data get shifted by 4 bytes. Fix it. Acked-by: Philipp Zabel Signed-off-by: Andrey Gusakov Signed-off-by: Andrzej Hajda Link: https://patchwork.freedesktop.org/patch/msgid/1510073785-16108-6-git-send-email-andrey.gusakov@cogentembedded.com Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/bridge/tc358767.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index b916346b933a5f..24f495bf0567bd 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -318,7 +318,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux, tmp = (tmp << 8) | buf[i]; i++; if (((i % 4) == 0) || (i == size)) { - tc_write(DP0_AUXWDATA(i >> 2), tmp); + tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp); tmp = 0; } } From 464711a74ce3238c7ca07f903bed3189efd23831 Mon Sep 17 00:00:00 2001 From: Andrey Gusakov Date: Tue, 7 Nov 2017 19:56:24 +0300 Subject: [PATCH 1648/3326] drm/bridge: tc358767: fix 1-lane behavior [ Upstream commit 4dbd6c03fbf88299c573d676838896c6e06aade2 ] Use drm_dp_channel_eq_ok helper Acked-by: Philipp Zabel Signed-off-by: Andrey Gusakov Signed-off-by: Andrzej Hajda Link: https://patchwork.freedesktop.org/patch/msgid/1510073785-16108-7-git-send-email-andrey.gusakov@cogentembedded.com Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/bridge/tc358767.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 24f495bf0567bd..8636e7eeb7315c 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -819,8 +819,6 @@ static int tc_main_link_setup(struct tc_data *tc) unsigned int rate; u32 dp_phy_ctrl; int timeout; - bool aligned; - bool ready; u32 value; int ret; u8 tmp[8]; @@ -965,16 +963,15 @@ static int tc_main_link_setup(struct tc_data *tc) ret = drm_dp_dpcd_read_link_status(aux, tmp + 2); if (ret < 0) goto err_dpcd_read; - ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */ - DP_CHANNEL_EQ_BITS)); /* Lane0 */ - aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE; - } while ((--timeout) && !(ready && aligned)); + } while ((--timeout) && + !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes))); if (timeout == 0) { /* Read DPCD 0x200-0x201 */ ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2); if (ret < 0) goto err_dpcd_read; + dev_err(dev, "channel(s) EQ not ok\n"); dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]); dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n", tmp[1]); @@ -985,10 +982,6 @@ static int tc_main_link_setup(struct tc_data *tc) dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n", tmp[6]); - if (!ready) - dev_err(dev, "Lane0/1 not ready\n"); - if (!aligned) - dev_err(dev, "Lane0/1 not aligned\n"); return -EAGAIN; } From 345dc6d499d8509ec9cc4b9b5b84a070ac87e0d5 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 24 Sep 2017 08:01:03 +0200 Subject: [PATCH 1649/3326] drm/omap: Fix error handling path in 'omap_dmm_probe()' [ Upstream commit 8677b1ac2db021ab30bb1fa34f1e56ebe0051ec3 ] If we don't find a matching device node, we must free the memory allocated in 'omap_dmm' a few lines above. Fixes: 7cb0d6c17b96 ("drm/omap: fix TILER on OMAP5") Signed-off-by: Christophe JAILLET Signed-off-by: Tomi Valkeinen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 1dd3dafc59afd2..c60a85e82c6d8a 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -638,7 +638,8 @@ static int omap_dmm_probe(struct platform_device *dev) match = of_match_node(dmm_of_match, dev->dev.of_node); if (!match) { dev_err(&dev->dev, "failed to find matching device node\n"); - return -ENODEV; + ret = -ENODEV; + goto fail; } omap_dmm->plat_data = match->data; From 4a96f3d05668cdee90779b07227054d4ec41b32d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 5 Oct 2017 14:06:41 +0200 Subject: [PATCH 1650/3326] drm/omap: displays: panel-dpi: add backlight dependency [ Upstream commit 499ec0ed5eb2f6a7fcaab2dd66ffc5993484bda9 ] The new backlight code causes a link failure when backlight support itself is disabled: drivers/gpu/drm/omapdrm/displays/panel-dpi.o: In function `panel_dpi_probe_of': panel-dpi.c:(.text+0x35c): undefined reference to `of_find_backlight_by_node' This adds a Kconfig dependency like we have for the other OMAP display targets. Fixes: 39135a305a0f ("drm/omap: displays: panel-dpi: Support for handling backlight devices") Signed-off-by: Arnd Bergmann Signed-off-by: Tomi Valkeinen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/omapdrm/displays/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig index c226da145fb3cd..a349cb61961e03 100644 --- a/drivers/gpu/drm/omapdrm/displays/Kconfig +++ b/drivers/gpu/drm/omapdrm/displays/Kconfig @@ -35,6 +35,7 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV config DRM_OMAP_PANEL_DPI tristate "Generic DPI panel" + depends on BACKLIGHT_CLASS_DEVICE help Driver for generic DPI panels. From 4e506f41705cf9b2ea64e9d698bc272dd9c25b2d Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Mon, 27 Nov 2017 09:50:17 -0800 Subject: [PATCH 1651/3326] xfs: ubsan fixes [ Upstream commit 22a6c83777ac7c17d6c63891beeeac24cf5da450 ] Fix some complaints from the UBSAN about signed integer addition overflows. Signed-off-by: Darrick J. Wong Reviewed-by: Brian Foster Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/xfs/xfs_aops.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index a3eeaba156c5ab..b0cccf8a81a89e 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -399,7 +399,7 @@ xfs_map_blocks( (ip->i_df.if_flags & XFS_IFEXTENTS)); ASSERT(offset <= mp->m_super->s_maxbytes); - if (offset + count > mp->m_super->s_maxbytes) + if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes) count = mp->m_super->s_maxbytes - offset; end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); offset_fsb = XFS_B_TO_FSBT(mp, offset); @@ -1265,7 +1265,7 @@ xfs_map_trim_size( if (mapping_size > size) mapping_size = size; if (offset < i_size_read(inode) && - offset + mapping_size >= i_size_read(inode)) { + (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) { /* limit mapping to block that spans EOF */ mapping_size = roundup_64(i_size_read(inode) - offset, i_blocksize(inode)); @@ -1312,7 +1312,7 @@ xfs_get_blocks( lockmode = xfs_ilock_data_map_shared(ip); ASSERT(offset <= mp->m_super->s_maxbytes); - if (offset + size > mp->m_super->s_maxbytes) + if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes) size = mp->m_super->s_maxbytes - offset; end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); offset_fsb = XFS_B_TO_FSBT(mp, offset); From 773a1c5baad243ad30b41700111f0c499f6a259d Mon Sep 17 00:00:00 2001 From: Carlos Maiolino Date: Tue, 28 Nov 2017 08:54:10 -0800 Subject: [PATCH 1652/3326] xfs: Properly retry failed dquot items in case of error during buffer writeback [ Upstream commit 373b0589dc8d58bc09c9a28d03611ae4fb216057 ] Once the inode item writeback errors is already fixed, it's time to fix the same problem in dquot code. Although there were no reports of users hitting this bug in dquot code (at least none I've seen), the bug is there and I was already planning to fix it when the correct approach to fix the inodes part was decided. This patch aims to fix the same problem in dquot code, regarding failed buffers being unable to be resubmitted once they are flush locked. Tested with the recently test-case sent to fstests list by Hou Tao. Reviewed-by: Brian Foster Signed-off-by: Carlos Maiolino Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/xfs/xfs_dquot.c | 14 +++++++++++--- fs/xfs/xfs_dquot_item.c | 40 ++++++++++++++++++++++++++++++++++++++-- 2 files changed, 49 insertions(+), 5 deletions(-) diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index cd82429d8df76d..5a86495127fdc9 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -987,14 +987,22 @@ xfs_qm_dqflush_done( * holding the lock before removing the dquot from the AIL. */ if ((lip->li_flags & XFS_LI_IN_AIL) && - lip->li_lsn == qip->qli_flush_lsn) { + ((lip->li_lsn == qip->qli_flush_lsn) || + (lip->li_flags & XFS_LI_FAILED))) { /* xfs_trans_ail_delete() drops the AIL lock. */ spin_lock(&ailp->xa_lock); - if (lip->li_lsn == qip->qli_flush_lsn) + if (lip->li_lsn == qip->qli_flush_lsn) { xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE); - else + } else { + /* + * Clear the failed state since we are about to drop the + * flush lock + */ + if (lip->li_flags & XFS_LI_FAILED) + xfs_clear_li_failed(lip); spin_unlock(&ailp->xa_lock); + } } /* diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c index 2c7a1629e064b4..664dea105e76fe 100644 --- a/fs/xfs/xfs_dquot_item.c +++ b/fs/xfs/xfs_dquot_item.c @@ -137,6 +137,26 @@ xfs_qm_dqunpin_wait( wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0)); } +/* + * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer + * have been failed during writeback + * + * this informs the AIL that the dquot is already flush locked on the next push, + * and acquires a hold on the buffer to ensure that it isn't reclaimed before + * dirty data makes it to disk. + */ +STATIC void +xfs_dquot_item_error( + struct xfs_log_item *lip, + struct xfs_buf *bp) +{ + struct xfs_dquot *dqp; + + dqp = DQUOT_ITEM(lip)->qli_dquot; + ASSERT(!completion_done(&dqp->q_flush)); + xfs_set_li_failed(lip, bp); +} + STATIC uint xfs_qm_dquot_logitem_push( struct xfs_log_item *lip, @@ -144,13 +164,28 @@ xfs_qm_dquot_logitem_push( __acquires(&lip->li_ailp->xa_lock) { struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; - struct xfs_buf *bp = NULL; + struct xfs_buf *bp = lip->li_buf; uint rval = XFS_ITEM_SUCCESS; int error; if (atomic_read(&dqp->q_pincount) > 0) return XFS_ITEM_PINNED; + /* + * The buffer containing this item failed to be written back + * previously. Resubmit the buffer for IO + */ + if (lip->li_flags & XFS_LI_FAILED) { + if (!xfs_buf_trylock(bp)) + return XFS_ITEM_LOCKED; + + if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list)) + rval = XFS_ITEM_FLUSHING; + + xfs_buf_unlock(bp); + return rval; + } + if (!xfs_dqlock_nowait(dqp)) return XFS_ITEM_LOCKED; @@ -242,7 +277,8 @@ static const struct xfs_item_ops xfs_dquot_item_ops = { .iop_unlock = xfs_qm_dquot_logitem_unlock, .iop_committed = xfs_qm_dquot_logitem_committed, .iop_push = xfs_qm_dquot_logitem_push, - .iop_committing = xfs_qm_dquot_logitem_committing + .iop_committing = xfs_qm_dquot_logitem_committing, + .iop_error = xfs_dquot_item_error }; /* From 612959943e409aa9a1dd5cfaa7ade8e270ead7d2 Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Wed, 15 Nov 2017 08:47:02 +0300 Subject: [PATCH 1653/3326] perf/core: Fix memory leak triggered by perf --namespace [ Upstream commit 0e18dd12064e07519f7cbff4149ca7fff620cbed ] perf with --namespace key leaks various memory objects including namespaces 4.14.0+ pid_namespace 1 12 2568 12 8 user_namespace 1 39 824 39 8 net_namespace 1 5 6272 5 8 This happen because perf_fill_ns_link_info() struct patch ns_path: during initialization ns_path incremented counters on related mnt and dentry, but without lost path_put nobody decremented them back. Leaked dentry is name of related namespace, and its leak does not allow to free unused namespace. Signed-off-by: Vasily Averin Acked-by: Peter Zijlstra Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Hari Bathini Cc: Jiri Olsa Cc: Linus Torvalds Cc: Namhyung Kim Cc: Thomas Gleixner Fixes: commit e422267322cd ("perf: Add PERF_RECORD_NAMESPACES to include namespaces related info") Link: http://lkml.kernel.org/r/c510711b-3904-e5e1-d296-61273d21118d@virtuozzo.com Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/events/core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/events/core.c b/kernel/events/core.c index 24ebad5567b4a7..8c20af8738ac02 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6756,6 +6756,7 @@ static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info, ns_inode = ns_path.dentry->d_inode; ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev); ns_link_info->ino = ns_inode->i_ino; + path_put(&ns_path); } } From 39527e909ea52bfec616536a1b02011f58a08c30 Mon Sep 17 00:00:00 2001 From: "Guilherme G. Piccoli" Date: Fri, 17 Nov 2017 19:14:55 -0200 Subject: [PATCH 1654/3326] scsi: aacraid: Prevent crash in case of free interrupt during scsi EH path [ Upstream commit e4717292ddebcfe231651b5aff9fa19ca158d178 ] As part of the scsi EH path, aacraid performs a reinitialization of the adapter, which encompass freeing resources and IRQs, NULLifying lots of pointers, and then initialize it all over again. We've identified a problem during the free IRQ portion of this path if CONFIG_DEBUG_SHIRQ is enabled on kernel config file. Happens that, in case this flag was set, right after free_irq() effectively clears the interrupt, it checks if it was requested as IRQF_SHARED. In positive case, it performs another call to the IRQ handler on driver. Problem is: since aacraid currently free some resources *before* freeing the IRQ, once free_irq() path calls the handler again (due to CONFIG_DEBUG_SHIRQ), aacraid crashes due to NULL pointer dereference with the following trace: aac_src_intr_message+0xf8/0x740 [aacraid] __free_irq+0x33c/0x4a0 free_irq+0x78/0xb0 aac_free_irq+0x13c/0x150 [aacraid] aac_reset_adapter+0x2e8/0x970 [aacraid] aac_eh_reset+0x3a8/0x5d0 [aacraid] scsi_try_host_reset+0x74/0x180 scsi_eh_ready_devs+0xc70/0x1510 scsi_error_handler+0x624/0xa20 This patch prevents the crash by changing the order of the deinitialization in this path of aacraid: first we clear the IRQ, then we free other resources. No functional change intended. Signed-off-by: Guilherme G. Piccoli Reviewed-by: Raghava Aditya Renukunta Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/aacraid/commsup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index ffbfd042be08c9..c0a4fcb7fd0afd 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1583,6 +1583,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) * will ensure that i/o is queisced and the card is flushed in that * case. */ + aac_free_irq(aac); aac_fib_map_free(aac); dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr, aac->comm_phys); @@ -1590,7 +1591,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) aac->comm_phys = 0; kfree(aac->queues); aac->queues = NULL; - aac_free_irq(aac); kfree(aac->fsa_dev); aac->fsa_dev = NULL; From 6c27a40c9e182ed318e886c4edc35fbbc182d384 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 20 Nov 2017 08:12:29 -0600 Subject: [PATCH 1655/3326] scsi: ufs: ufshcd: fix potential NULL pointer dereference in ufshcd_config_vreg [ Upstream commit 727535903bea924c4f73abb202c4b3e85fff0ca4 ] _vreg_ is being dereferenced before it is null checked, hence there is a potential null pointer dereference. Fix this by moving the pointer dereference after _vreg_ has been null checked. This issue was detected with the help of Coccinelle. Fixes: aa4976130934 ("ufs: Add regulator enable support") Signed-off-by: Gustavo A. R. Silva Reviewed-by: Subhash Jadavani Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/ufs/ufshcd.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 794a4600e952a4..d344fef01f1db7 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -6555,12 +6555,15 @@ static int ufshcd_config_vreg(struct device *dev, struct ufs_vreg *vreg, bool on) { int ret = 0; - struct regulator *reg = vreg->reg; - const char *name = vreg->name; + struct regulator *reg; + const char *name; int min_uV, uA_load; BUG_ON(!vreg); + reg = vreg->reg; + name = vreg->name; + if (regulator_count_voltages(reg) > 0) { min_uV = on ? vreg->min_uV : 0; ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); From 99f3d5f37e58692f1147e6d4b0ca2e42fe1f6453 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 15 Nov 2017 14:12:30 +0200 Subject: [PATCH 1656/3326] iwlwifi: mvm: fix the TX queue hang timeout for MONITOR vif type [ Upstream commit d1b275ffec459c5ae12b5c7086c84175696e5a9f ] The MONITOR type is missing in the interface type switch. Add it. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 53e269d5405056..0ae7624eac9dbc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1181,6 +1181,8 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, return le32_to_cpu(txq_timer->p2p_go); case NL80211_IFTYPE_P2P_DEVICE: return le32_to_cpu(txq_timer->p2p_device); + case NL80211_IFTYPE_MONITOR: + return default_timeout; default: WARN_ON(1); return mvm->cfg->base_params->wd_timeout; From c67fa169456e2179be40b3e398030019879dacd2 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 20 Nov 2017 13:25:05 +0200 Subject: [PATCH 1657/3326] iwlwifi: fix access to prph when transport is stopped [ Upstream commit 0232d2cd7aa8e1b810fe84fb4059a0bd1eabe2ba ] When getting HW rfkill we get stop_device being called from two paths. One path is the IRQ calling stop device, and updating op mode and stack. As a result, cfg80211 is running rfkill sync work that shuts down all devices (second path). In the second path, we eventually get to iwl_mvm_stop_device which calls iwl_fw_dump_conf_clear->iwl_fw_dbg_stop_recording, that access periphery registers. The device may be stopped at this point from the first path, which will result with a failure to access those registers. Simply checking for the trans status is insufficient, since the race will still exist, only minimized. Instead, move the stop from iwl_fw_dump_conf_clear (which is getting called only from stop path) to the transport stop device function, where the access is always safe. This has the added value, of actually stopping dbgc before stopping device even when the stop is initiated from the transport. Fixes: 1efc3843a4ee ("iwlwifi: stop dbgc recording before stopping DMA") Signed-off-by: Sara Sharon Signed-off-by: Luca Coelho Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/fw/dbg.h | 2 -- drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c | 6 ++++++ drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 9 +++++++++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 9c889a32fe2424..223fb77a3aa9d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -209,8 +209,6 @@ static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt) static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) { - iwl_fw_dbg_stop_recording(fwrt); - fwrt->dump.conf = FW_DBG_INVALID; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index c59f4581e97271..ac05fd1e74c4c8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -49,6 +49,7 @@ * *****************************************************************************/ #include "iwl-trans.h" +#include "iwl-prph.h" #include "iwl-context-info.h" #include "internal.h" @@ -156,6 +157,11 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) trans_pcie->is_down = true; + /* Stop dbgc before stopping device */ + iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); + udelay(100); + iwl_write_prph(trans, DBGC_OUT_CTRL, 0); + /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 2e3e013ec95acf..12a9b86d71eacf 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1138,6 +1138,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) trans_pcie->is_down = true; + /* Stop dbgc before stopping device */ + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); + } else { + iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); + udelay(100); + iwl_write_prph(trans, DBGC_OUT_CTRL, 0); + } + /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); From 12f165f4418aaa904e7412ee178ee43437f65bc8 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 7 Nov 2017 10:58:34 -0800 Subject: [PATCH 1658/3326] ARM: dts: NSP: Disable AHCI controller for HR NSP boards [ Upstream commit 77416ab35f5712382e5a792bfa1736ceb70d5bbb ] The AHCI controller is currently enabled for all of these boards: bcm958623hr and bcm958625hr would result in a hard hang on boot that we cannot get rid of. Since this does not appear to have an easy and simple fix, just disable the AHCI controller for now until this gets resolved. Fixes: 70725d6e97ac ("ARM: dts: NSP: Enable SATA on bcm958625hr") Fixes: d454c3762437 ("ARM: dts: NSP: Add new DT file for bcm958623hr") Acked-by: Jon Mason Signed-off-by: Florian Fainelli Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/bcm958623hr.dts | 4 ---- arch/arm/boot/dts/bcm958625hr.dts | 4 ---- 2 files changed, 8 deletions(-) diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts index 3bc50849d013ff..b8bde13de90a57 100644 --- a/arch/arm/boot/dts/bcm958623hr.dts +++ b/arch/arm/boot/dts/bcm958623hr.dts @@ -141,10 +141,6 @@ status = "okay"; }; -&sata { - status = "okay"; -}; - &qspi { bspi-sel = <0>; flash: m25p80@0 { diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts index d94d14b3c745a0..6a44b802170217 100644 --- a/arch/arm/boot/dts/bcm958625hr.dts +++ b/arch/arm/boot/dts/bcm958625hr.dts @@ -177,10 +177,6 @@ status = "okay"; }; -&sata { - status = "okay"; -}; - &srab { compatible = "brcm,bcm58625-srab", "brcm,nsp-srab"; status = "okay"; From 504b902b36a9cd487cbcd2f209a9a5c6213cb320 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 7 Nov 2017 11:10:29 -0800 Subject: [PATCH 1659/3326] ARM: dts: NSP: Fix PPI interrupt types [ Upstream commit 5f1aa51c7a1eef1c5a60b8334e32c89904964245 ] Booting a kernel results in the kernel warning us about the following PPI interrupts configuration: [ 0.105127] smp: Bringing up secondary CPUs ... [ 0.110545] GIC: PPI11 is secure or misconfigured [ 0.110551] GIC: PPI13 is secure or misconfigured Fix this by using the appropriate edge configuration for PPI11 and PPI13, this is similar to what was fixed for Northstar (BCM5301X) in commit 0e34079cd1f6 ("ARM: dts: BCM5301X: Correct GIC_PPI interrupt flags"). Fixes: 7b2e987de207 ("ARM: NSP: add minimal Northstar Plus device tree") Fixes: 1a9d53cabaf4 ("ARM: dts: NSP: Add TWD Support to DT") Acked-by: Jon Mason Signed-off-by: Florian Fainelli Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/bcm-nsp.dtsi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index dff66974feeda5..d5f5e92e74889d 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -85,7 +85,7 @@ timer@20200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x20200 0x100>; - interrupts = ; + interrupts = ; clocks = <&periph_clk>; }; @@ -93,7 +93,7 @@ compatible = "arm,cortex-a9-twd-timer"; reg = <0x20600 0x20>; interrupts = ; + IRQ_TYPE_EDGE_RISING)>; clocks = <&periph_clk>; }; From 99a8cad9c1b2236439d7d44c86cdbcdd5831154b Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Sun, 16 Apr 2017 02:51:16 -0400 Subject: [PATCH 1660/3326] media: usbtv: add a new usbid [ Upstream commit 04226916d2360f56d57ad00bc48d2d1854d1e0b0 ] A new usbid of UTV007 is found in a newly bought device. The usbid is 1f71:3301. The ID on the chip is: UTV007 A89029.1 1520L18K1 Both video and audio is tested with the modified usbtv driver. Signed-off-by: Icenowy Zheng Acked-by: Lubomir Rintel Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/usbtv/usbtv-core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c index f06f09a0876e5c..71fb5734995b35 100644 --- a/drivers/media/usb/usbtv/usbtv-core.c +++ b/drivers/media/usb/usbtv/usbtv-core.c @@ -144,6 +144,7 @@ static void usbtv_disconnect(struct usb_interface *intf) static const struct usb_device_id usbtv_id_table[] = { { USB_DEVICE(0x1b71, 0x3002) }, + { USB_DEVICE(0x1f71, 0x3301) }, {} }; MODULE_DEVICE_TABLE(usb, usbtv_id_table); From aafb1a7eb0f73cf8bb59ed7cc6d29e19d4de49bc Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Fri, 24 Nov 2017 09:42:21 +0100 Subject: [PATCH 1661/3326] x86/xen: Support early interrupts in xen pv guests [ Upstream commit 42b3a4cb5609de757f5445fcad18945ba9239a07 ] Add early interrupt handlers activated by idt_setup_early_handler() to the handlers supported by Xen pv guests. This will allow for early WARN() calls not crashing the guest. Suggested-by: Andy Lutomirski Signed-off-by: Juergen Gross Signed-off-by: Thomas Gleixner Cc: xen-devel@lists.xenproject.org Cc: boris.ostrovsky@oracle.com Link: https://lkml.kernel.org/r/20171124084221.30172-1-jgross@suse.com Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/segment.h | 12 +++++++++++ arch/x86/mm/extable.c | 4 +++- arch/x86/xen/enlighten_pv.c | 37 ++++++++++++++++++++++------------ arch/x86/xen/xen-asm_64.S | 14 +++++++++++++ 4 files changed, 53 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index b20f9d623f9c63..8f09012b92e779 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -236,11 +236,23 @@ */ #define EARLY_IDT_HANDLER_SIZE 9 +/* + * xen_early_idt_handler_array is for Xen pv guests: for each entry in + * early_idt_handler_array it contains a prequel in the form of + * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to + * max 8 bytes. + */ +#define XEN_EARLY_IDT_HANDLER_SIZE 8 + #ifndef __ASSEMBLY__ extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE]; extern void early_ignore_irq(void); +#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV) +extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE]; +#endif + /* * Load a segment. Fall back on loading the zero segment if something goes * wrong. This variant assumes that loading zero fully clears the segment. diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 30bc4812ceb8a1..9fe656c42aa5b1 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -1,6 +1,7 @@ #include #include #include +#include #include #include @@ -212,8 +213,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr) * Old CPUs leave the high bits of CS on the stack * undefined. I'm not sure which CPUs do this, but at least * the 486 DX works this way. + * Xen pv domains are not using the default __KERNEL_CS. */ - if (regs->cs != __KERNEL_CS) + if (!xen_pv_domain() && regs->cs != __KERNEL_CS) goto fail; /* diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index ae3a071e1d0f4e..899a22a02e95ba 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -622,7 +622,7 @@ static struct trap_array_entry trap_array[] = { { simd_coprocessor_error, xen_simd_coprocessor_error, false }, }; -static bool get_trap_addr(void **addr, unsigned int ist) +static bool __ref get_trap_addr(void **addr, unsigned int ist) { unsigned int nr; bool ist_okay = false; @@ -644,6 +644,14 @@ static bool get_trap_addr(void **addr, unsigned int ist) } } + if (nr == ARRAY_SIZE(trap_array) && + *addr >= (void *)early_idt_handler_array[0] && + *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) { + nr = (*addr - (void *)early_idt_handler_array[0]) / + EARLY_IDT_HANDLER_SIZE; + *addr = (void *)xen_early_idt_handler_array[nr]; + } + if (WARN_ON(ist != 0 && !ist_okay)) return false; @@ -1261,6 +1269,21 @@ asmlinkage __visible void __init xen_start_kernel(void) xen_setup_gdt(0); xen_init_irq_ops(); + + /* Let's presume PV guests always boot on vCPU with id 0. */ + per_cpu(xen_vcpu_id, 0) = 0; + + /* + * Setup xen_vcpu early because idt_setup_early_handler needs it for + * local_irq_disable(), irqs_disabled(). + * + * Don't do the full vcpu_info placement stuff until we have + * the cpu_possible_mask and a non-dummy shared_info. + */ + xen_vcpu_info_reset(0); + + idt_setup_early_handler(); + xen_init_capabilities(); #ifdef CONFIG_X86_LOCAL_APIC @@ -1294,18 +1317,6 @@ asmlinkage __visible void __init xen_start_kernel(void) */ acpi_numa = -1; #endif - /* Let's presume PV guests always boot on vCPU with id 0. */ - per_cpu(xen_vcpu_id, 0) = 0; - - /* - * Setup xen_vcpu early because start_kernel needs it for - * local_irq_disable(), irqs_disabled(). - * - * Don't do the full vcpu_info placement stuff until we have - * the cpu_possible_mask and a non-dummy shared_info. - */ - xen_vcpu_info_reset(0); - WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv)); local_irq_disable(); diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index 8a10c9a9e2b506..417b339e5c8e1a 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S @@ -15,6 +15,7 @@ #include +#include #include .macro xen_pv_trap name @@ -54,6 +55,19 @@ xen_pv_trap entry_INT80_compat #endif xen_pv_trap hypervisor_callback + __INIT +ENTRY(xen_early_idt_handler_array) + i = 0 + .rept NUM_EXCEPTION_VECTORS + pop %rcx + pop %r11 + jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE + i = i + 1 + .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc + .endr +END(xen_early_idt_handler_array) + __FINIT + hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 /* * Xen64 iret frame: From 0479bc01767ff58170a2f1ae27e6b98f9dfa01b9 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 14 Nov 2017 16:18:28 +0000 Subject: [PATCH 1662/3326] usb: gadget: don't dereference g until after it has been null checked [ Upstream commit b2fc059fa549fe6881d4c1f8d698b0f50bcd16ec ] Avoid dereferencing pointer g until after g has been sanity null checked; move the assignment of cdev much later when it is required into a more local scope. Detected by CoverityScan, CID#1222135 ("Dereference before null check") Fixes: b785ea7ce662 ("usb: gadget: composite: fix ep->maxburst initialization") Signed-off-by: Colin Ian King Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/composite.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 5d061b3d8224aa..ed9346f0b0005b 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -150,7 +150,6 @@ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, struct usb_ep *_ep) { - struct usb_composite_dev *cdev = get_gadget_data(g); struct usb_endpoint_descriptor *chosen_desc = NULL; struct usb_descriptor_header **speed_desc = NULL; @@ -229,8 +228,12 @@ int config_ep_by_speed(struct usb_gadget *g, _ep->maxburst = comp_desc->bMaxBurst + 1; break; default: - if (comp_desc->bMaxBurst != 0) + if (comp_desc->bMaxBurst != 0) { + struct usb_composite_dev *cdev; + + cdev = get_gadget_data(g); ERROR(cdev, "ep0 bMaxBurst must be 0\n"); + } _ep->maxburst = 1; break; } From c789cfe0ae72005f368aef5c361851fabec0e5f5 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Sat, 25 Nov 2017 13:32:38 -0600 Subject: [PATCH 1663/3326] staging: rtl8188eu: Fix incorrect response to SIOCGIWESSID [ Upstream commit b77992d2df9e47144354d1b25328b180afa33442 ] When not associated with an AP, wifi device drivers should respond to the SIOCGIWESSID ioctl with a zero-length string for the SSID, which is the behavior expected by dhcpcd. Currently, this driver returns an error code (-1) from the ioctl call, which causes dhcpcd to assume that the device is not a wireless interface and therefore it fails to work correctly with it thereafter. This problem was reported and tested at https://github.com/lwfinger/rtl8188eu/issues/234. Signed-off-by: Larry Finger Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtl8188eu/os_dep/ioctl_linux.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index c0664dc80bf246..446310775e9021 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c @@ -1395,19 +1395,13 @@ static int rtw_wx_get_essid(struct net_device *dev, if ((check_fwstate(pmlmepriv, _FW_LINKED)) || (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) { len = pcur_bss->Ssid.SsidLength; - - wrqu->essid.length = len; - memcpy(extra, pcur_bss->Ssid.Ssid, len); - - wrqu->essid.flags = 1; } else { - ret = -1; - goto exit; + len = 0; + *extra = 0; } - -exit: - + wrqu->essid.length = len; + wrqu->essid.flags = 1; return ret; } From e9273b08d4fb808baeebcd2637b5257d3f0db18c Mon Sep 17 00:00:00 2001 From: Stefan Schake Date: Fri, 29 Dec 2017 17:05:43 +0100 Subject: [PATCH 1664/3326] drm/vc4: Move IRQ enable to PM path [ Upstream commit ce9caf2f79a5aa170a4b6456a03db639eed9c988 ] We were calling enable_irq on bind, where it was already enabled previously by the IRQ helper. Additionally, dev->irq is not set correctly until after postinstall and so was always zero here, triggering a warning in 4.15. Fix both by moving the enable to the power management resume path, where we know there was a previous disable invocation during suspend. Fixes: 253696ccd613 ("drm/vc4: Account for interrupts in flight") Signed-off-by: Stefan Schake Signed-off-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/1514563543-32511-1-git-send-email-stschake@gmail.com Tested-by: Stefan Wahren Reviewed-by: Eric Anholt Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/vc4/vc4_irq.c | 3 --- drivers/gpu/drm/vc4/vc4_v3d.c | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index 61b2e5377993dc..521addec831e99 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c @@ -208,9 +208,6 @@ vc4_irq_postinstall(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - /* Undo the effects of a previous vc4_irq_uninstall. */ - enable_irq(dev->irq); - /* Enable both the render done and out of memory interrupts. */ V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index 622cd43840b8c5..493f392b3a0a90 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev) return ret; vc4_v3d_init_hw(vc4->dev); + + /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */ + enable_irq(vc4->dev->irq); vc4_irq_postinstall(vc4->dev); return 0; From c065b9947b05d63936dad5f3d5aa6e83d1e0346f Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 11 Jan 2018 16:55:24 +0100 Subject: [PATCH 1665/3326] KVM: x86: emulate #UD while in guest mode [ Upstream commit bd89525a823ce6edddcedbe9aed79faa1b9cf544 ] This reverts commits ae1f57670703656cc9f293722c3b8b6782f8ab3f and ac9b305caa0df6f5b75d294e4b86c1027648991e. If the hardware doesn't support MOVBE, but L0 sets CPUID.01H:ECX.MOVBE in L1's emulated CPUID information, then L1 is likely to pass that CPUID bit through to L2. L2 will expect MOVBE to work, but if L1 doesn't intercept #UD, then any MOVBE instruction executed in L2 will raise #UD, and the exception will be delivered in L2. Commit ac9b305caa0df6f5b75d294e4b86c1027648991e is a better and more complete version of ae1f57670703 ("KVM: nVMX: Do not emulate #UD while in guest mode"); however, neither considers the above case. Suggested-by: Jim Mattson Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/svm.c | 9 +-------- arch/x86/kvm/vmx.c | 5 +---- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index c8be4e6d365b8a..6a8284f7232830 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -362,7 +362,6 @@ static void recalc_intercepts(struct vcpu_svm *svm) { struct vmcb_control_area *c, *h; struct nested_state *g; - u32 h_intercept_exceptions; mark_dirty(svm->vmcb, VMCB_INTERCEPTS); @@ -373,14 +372,9 @@ static void recalc_intercepts(struct vcpu_svm *svm) h = &svm->nested.hsave->control; g = &svm->nested; - /* No need to intercept #UD if L1 doesn't intercept it */ - h_intercept_exceptions = - h->intercept_exceptions & ~(1U << UD_VECTOR); - c->intercept_cr = h->intercept_cr | g->intercept_cr; c->intercept_dr = h->intercept_dr | g->intercept_dr; - c->intercept_exceptions = - h_intercept_exceptions | g->intercept_exceptions; + c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions; c->intercept = h->intercept | g->intercept; } @@ -2195,7 +2189,6 @@ static int ud_interception(struct vcpu_svm *svm) { int er; - WARN_ON_ONCE(is_guest_mode(&svm->vcpu)); er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); if (er == EMULATE_USER_EXIT) return 0; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5242fac8165b4f..a45063a9219c05 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1891,7 +1891,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) { u32 eb; - eb = (1u << PF_VECTOR) | (1u << MC_VECTOR) | + eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR); if ((vcpu->guest_debug & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == @@ -1909,8 +1909,6 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) */ if (is_guest_mode(vcpu)) eb |= get_vmcs12(vcpu)->exception_bitmap; - else - eb |= 1u << UD_VECTOR; vmcs_write32(EXCEPTION_BITMAP, eb); } @@ -5921,7 +5919,6 @@ static int handle_exception(struct kvm_vcpu *vcpu) return 1; /* already handled by vmx_vcpu_run() */ if (is_invalid_opcode(intr_info)) { - WARN_ON_ONCE(is_guest_mode(vcpu)); er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); if (er == EMULATE_USER_EXIT) return 0; From 7a4b5ee973cfbcf32bff2fe03bed1786d5baa14e Mon Sep 17 00:00:00 2001 From: Dmitry Eremin Date: Thu, 25 Jan 2018 16:51:04 +0300 Subject: [PATCH 1666/3326] staging: lustre: separate a connection destroy from free struct kib_conn commit 9b046013e5837f8a58453d1e9f8e01d03adb7fe7 upstream. The logic of the original commit 4d99b2581eff ("staging: lustre: avoid intensive reconnecting for ko2iblnd") was assumed conditional free of struct kib_conn if the second argument free_conn in function kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn) is true. But this hunk of code was dropped from original commit. As result the logic works wrong and current code use struct kib_conn after free. > drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c > 3317 kiblnd_destroy_conn(conn, !peer); > ^^^^ Freed always (but should be conditionally) > 3318 > 3319 spin_lock_irqsave(lock, flags); > 3320 if (!peer) > 3321 continue; > 3322 > 3323 conn->ibc_peer = peer; > ^^^^^^^^^^^^^^ Use after free > 3324 if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE) > 3325 list_add_tail(&conn->ibc_list, > ^^^^^^^^^^^^^^ Use after free > 3326 &kiblnd_data.kib_reconn_list); > 3327 else > 3328 list_add_tail(&conn->ibc_list, > ^^^^^^^^^^^^^^ Use after free > 3329 &kiblnd_data.kib_reconn_wait); To avoid confusion this fix moved the freeing a struct kib_conn outside of the function kiblnd_destroy_conn() and free as it was intended in original commit. Fixes: 4d99b2581eff ("staging: lustre: avoid intensive reconnecting for ko2iblnd") Signed-off-by: Dmitry Eremin Reviewed-by: Andreas Dilger Signed-off-by: Greg Kroah-Hartman Signed-off-by: Greg Kroah-Hartman --- drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c | 7 +++---- drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h | 2 +- drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c | 6 ++++-- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index 64763aacda57d5..284cdd44a2ee39 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -825,14 +825,15 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm return conn; failed_2: - kiblnd_destroy_conn(conn, true); + kiblnd_destroy_conn(conn); + LIBCFS_FREE(conn, sizeof(*conn)); failed_1: LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); failed_0: return NULL; } -void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn) +void kiblnd_destroy_conn(struct kib_conn *conn) { struct rdma_cm_id *cmid = conn->ibc_cmid; struct kib_peer *peer = conn->ibc_peer; @@ -895,8 +896,6 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn) rdma_destroy_id(cmid); atomic_dec(&net->ibn_nconns); } - - LIBCFS_FREE(conn, sizeof(*conn)); } int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why) diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h index a1e994a1cc84ea..98a5e2c21a8341 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h @@ -1015,7 +1015,7 @@ int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why); struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cmid, int state, int version); -void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn); +void kiblnd_destroy_conn(struct kib_conn *conn); void kiblnd_close_conn(struct kib_conn *conn, int error); void kiblnd_close_conn_locked(struct kib_conn *conn, int error); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index 8fc191d9992703..29e10021b906b2 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -3313,11 +3313,13 @@ kiblnd_connd(void *arg) spin_unlock_irqrestore(lock, flags); dropped_lock = 1; - kiblnd_destroy_conn(conn, !peer); + kiblnd_destroy_conn(conn); spin_lock_irqsave(lock, flags); - if (!peer) + if (!peer) { + kfree(conn); continue; + } conn->ibc_peer = peer; if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE) From fad7da7b638a1f5900bee31ed598b7c527d3a14c Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Sun, 3 Dec 2017 13:58:19 +0000 Subject: [PATCH 1667/3326] staging: ccree: NULLify backup_info when unused commit 46df8824982e4fb0198776078d4a8c3e2d531464 upstream. backup_info field is only allocated for decrypt code path. The field was not nullified when not used causing a kfree in an error handling path to attempt to free random addresses as uncovered in stress testing. Fixes: 737aed947f9b ("staging: ccree: save ciphertext for CTS IV") Signed-off-by: Gilad Ben-Yossef Signed-off-by: Greg Kroah-Hartman --- drivers/staging/ccree/ssi_cipher.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c index 8d31a93fd8b761..087a622f20b240 100644 --- a/drivers/staging/ccree/ssi_cipher.c +++ b/drivers/staging/ccree/ssi_cipher.c @@ -904,6 +904,7 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req) scatterwalk_map_and_copy(req_ctx->backup_info, req->src, (req->nbytes - ivsize), ivsize, 0); req_ctx->is_giv = false; + req_ctx->backup_info = NULL; return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT); } From eb6de1af2e7235133a0e2563c09926ca28361095 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Thu, 14 Dec 2017 14:02:46 +0000 Subject: [PATCH 1668/3326] staging: ccree: fix fips event irq handling build commit dc5591dc9c03e4cd22d3f0c3659196cc34668452 upstream. When moving from internal for kernel FIPS infrastructure the FIPS event irq handling code was left with the old ifdef by mistake. Fix it. Fixes: b7e607bf33a2 ("staging: ccree: move FIPS support to kernel infrastructure") Signed-off-by: Gilad Ben-Yossef Signed-off-by: Greg Kroah-Hartman --- drivers/staging/ccree/ssi_driver.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c index 9c6f1200c130a0..eeb995307951c7 100644 --- a/drivers/staging/ccree/ssi_driver.c +++ b/drivers/staging/ccree/ssi_driver.c @@ -141,7 +141,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id) irr &= ~SSI_COMP_IRQ_MASK; complete_request(drvdata); } -#ifdef CC_SUPPORT_FIPS +#ifdef CONFIG_CRYPTO_FIPS /* TEE FIPS interrupt */ if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) { /* Mask interrupt - will be unmasked in Deferred service handler */ From 3c538ad935465d7ba4bf0b5747fadcfdf05311c9 Mon Sep 17 00:00:00 2001 From: Gaurav Kohli Date: Tue, 23 Jan 2018 13:16:34 +0530 Subject: [PATCH 1669/3326] tty: fix data race between tty_init_dev and flush of buf commit b027e2298bd588d6fa36ed2eda97447fb3eac078 upstream. There can be a race, if receive_buf call comes before tty initialization completes in n_tty_open and tty->disc_data may be NULL. CPU0 CPU1 ---- ---- 000|n_tty_receive_buf_common() n_tty_open() -001|n_tty_receive_buf2() tty_ldisc_open.isra.3() -002|tty_ldisc_receive_buf(inline) tty_ldisc_setup() Using ldisc semaphore lock in tty_init_dev till disc_data initializes completely. Signed-off-by: Gaurav Kohli Reviewed-by: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/tty/tty_io.c | 8 +++++++- drivers/tty/tty_ldisc.c | 4 ++-- include/linux/tty.h | 2 ++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 94cccb6efa3240..7892d0be8af9ae 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1322,6 +1322,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) "%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n", __func__, tty->driver->name); + retval = tty_ldisc_lock(tty, 5 * HZ); + if (retval) + goto err_release_lock; tty->port->itty = tty; /* @@ -1332,6 +1335,7 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) retval = tty_ldisc_setup(tty, tty->link); if (retval) goto err_release_tty; + tty_ldisc_unlock(tty); /* Return the tty locked so that it cannot vanish under the caller */ return tty; @@ -1344,9 +1348,11 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) /* call the tty release_tty routine to clean out this slot */ err_release_tty: - tty_unlock(tty); + tty_ldisc_unlock(tty); tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n", retval, idx); +err_release_lock: + tty_unlock(tty); release_tty(tty, idx); return ERR_PTR(retval); } diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 84a8ac2a779f6b..7c895684c3efd1 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -336,7 +336,7 @@ static inline void __tty_ldisc_unlock(struct tty_struct *tty) ldsem_up_write(&tty->ldisc_sem); } -static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout) +int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout) { int ret; @@ -347,7 +347,7 @@ static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout) return 0; } -static void tty_ldisc_unlock(struct tty_struct *tty) +void tty_ldisc_unlock(struct tty_struct *tty) { clear_bit(TTY_LDISC_HALTED, &tty->flags); __tty_ldisc_unlock(tty); diff --git a/include/linux/tty.h b/include/linux/tty.h index 7ac8ba208b1fe2..0a6c71e0ad01ec 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -405,6 +405,8 @@ extern const char *tty_name(const struct tty_struct *tty); extern struct tty_struct *tty_kopen(dev_t device); extern void tty_kclose(struct tty_struct *tty); extern int tty_dev_name_to_number(const char *name, dev_t *number); +extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout); +extern void tty_ldisc_unlock(struct tty_struct *tty); #else static inline void tty_kref_put(struct tty_struct *tty) { } From 94791411130312ba655c944c4623de1ff4c5a614 Mon Sep 17 00:00:00 2001 From: OKAMOTO Yoshiaki Date: Tue, 16 Jan 2018 09:51:17 +0000 Subject: [PATCH 1670/3326] usb: option: Add support for FS040U modem commit 69341bd15018da0a662847e210f9b2380c71e623 upstream. FS040U modem is manufactured by omega, and sold by Fujisoft. This patch adds ID of the modem to use option1 driver. Interface 3 is used as qmi_wwan, so the interface is ignored. Signed-off-by: Yoshiaki Okamoto Signed-off-by: Hiroyuki Yamamoto Acked-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index a9400458cceaaf..dcf78a49892716 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -383,6 +383,9 @@ static void option_instat_callback(struct urb *urb); #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01 +/* Fujisoft products */ +#define FUJISOFT_PRODUCT_FS040U 0x9b02 + /* iBall 3.5G connect wireless modem */ #define IBALL_3_5G_CONNECT 0x9605 @@ -1897,6 +1900,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100), .driver_info = (kernel_ulong_t)&four_g_w100_blacklist }, + {USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist}, { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff), .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, From bfc372036b41fec5162c38c18aa824698c33b825 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 25 Jan 2018 09:48:55 +0100 Subject: [PATCH 1671/3326] USB: serial: pl2303: new device id for Chilitag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d08dd3f3dd2ae351b793fc5b76abdbf0fd317b12 upstream. This adds a new device id for Chilitag devices to the pl2303 driver. Reported-by: "Chu.Mike [朱堅宜]" Acked-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/pl2303.c | 1 + drivers/usb/serial/pl2303.h | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index a585b477415dde..34c5a75f98a750 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -41,6 +41,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 3b5a15d1dc0dd5..123289085ee259 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -17,6 +17,7 @@ #define PL2303_PRODUCT_ID_DCU11 0x1234 #define PL2303_PRODUCT_ID_PHAROS 0xaaa0 #define PL2303_PRODUCT_ID_RSAQ3 0xaaa2 +#define PL2303_PRODUCT_ID_CHILITAG 0xaaa8 #define PL2303_PRODUCT_ID_ALDIGA 0x0611 #define PL2303_PRODUCT_ID_MMX 0x0612 #define PL2303_PRODUCT_ID_GPRS 0x0609 From ca7964983ca453dd9902c40171e93a69f21ee3ee Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 14 Jan 2018 16:09:00 +0100 Subject: [PATCH 1672/3326] USB: cdc-acm: Do not log urb submission errors on disconnect commit f0386c083c2ce85284dc0b419d7b89c8e567c09f upstream. When disconnected sometimes the cdc-acm driver logs errors like these: [20278.039417] cdc_acm 2-2:2.1: urb 9 failed submission with -19 [20278.042924] cdc_acm 2-2:2.1: urb 10 failed submission with -19 [20278.046449] cdc_acm 2-2:2.1: urb 11 failed submission with -19 [20278.049920] cdc_acm 2-2:2.1: urb 12 failed submission with -19 [20278.053442] cdc_acm 2-2:2.1: urb 13 failed submission with -19 [20278.056915] cdc_acm 2-2:2.1: urb 14 failed submission with -19 [20278.060418] cdc_acm 2-2:2.1: urb 15 failed submission with -19 Silence these by not logging errors when the result is -ENODEV. Signed-off-by: Hans de Goede Acked-by: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-acm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 18c923a4c16e1f..b133fc68b3fdca 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -438,7 +438,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) res = usb_submit_urb(acm->read_urbs[index], mem_flags); if (res) { - if (res != -EPERM) { + if (res != -EPERM && res != -ENODEV) { dev_err(&acm->data->dev, "urb %d failed submission with %d\n", index, res); From 34b812ca30389a3759bf89d424c2d256185face3 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Thu, 18 Jan 2018 12:13:45 +0100 Subject: [PATCH 1673/3326] CDC-ACM: apply quirk for card reader commit df1cc78a52491f71d8170d513d0f6f114faa1bda upstream. This devices drops random bytes from messages if you talk to it too fast. Signed-off-by: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-acm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index b133fc68b3fdca..4149a965516e88 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1765,6 +1765,9 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */ .driver_info = SINGLE_RX_URB, /* firmware bug */ }, + { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */ + .driver_info = SINGLE_RX_URB, + }, { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, From 9a24d3f8c566bfdb3bb2126055426c6ae952cdd5 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Wed, 13 Dec 2017 20:34:36 +0800 Subject: [PATCH 1674/3326] USB: serial: io_edgeport: fix possible sleep-in-atomic commit c7b8f77872c73f69a16528a9eb87afefcccdc18b upstream. According to drivers/usb/serial/io_edgeport.c, the driver may sleep under a spinlock. The function call path is: edge_bulk_in_callback (acquire the spinlock) process_rcvd_data process_rcvd_status change_port_settings send_iosp_ext_cmd write_cmd_usb usb_kill_urb --> may sleep To fix it, the redundant usb_kill_urb() is removed from the error path after usb_submit_urb() fails. This possible bug is found by my static analysis tool (DSAC) and checked by my code review. Signed-off-by: Jia-Ju Bai Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/io_edgeport.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index bdf8bd814a9aaf..01f3ac7769f303 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -2286,7 +2286,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port, /* something went wrong */ dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n", __func__, status); - usb_kill_urb(urb); usb_free_urb(urb); atomic_dec(&CmdUrbs); return status; From aa7cdae765cf383067cb08a854f0d5738c684a78 Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Wed, 17 Jan 2018 12:07:30 -0700 Subject: [PATCH 1675/3326] usbip: prevent bind loops on devices attached to vhci_hcd commit ef54cf0c600fb8f5737fb001a9e357edda1a1de8 upstream. usbip host binds to devices attached to vhci_hcd on the same server when user does attach over localhost or specifies the server as the remote. usbip attach -r localhost -b busid or usbip attach -r servername (or server IP) Unbind followed by bind works, however device is left in a bad state with accesses via the attached busid result in errors and system hangs during shutdown. Fix it to check and bail out if the device is already attached to vhci_hcd. Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- tools/usb/usbip/src/usbip_bind.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/usb/usbip/src/usbip_bind.c b/tools/usb/usbip/src/usbip_bind.c index fa46141ae68bbd..e121cfb1746a4c 100644 --- a/tools/usb/usbip/src/usbip_bind.c +++ b/tools/usb/usbip/src/usbip_bind.c @@ -144,6 +144,7 @@ static int bind_device(char *busid) int rc; struct udev *udev; struct udev_device *dev; + const char *devpath; /* Check whether the device with this bus ID exists. */ udev = udev_new(); @@ -152,8 +153,16 @@ static int bind_device(char *busid) err("device with the specified bus ID does not exist"); return -1; } + devpath = udev_device_get_devpath(dev); udev_unref(udev); + /* If the device is already attached to vhci_hcd - bail out */ + if (strstr(devpath, USBIP_VHCI_DRV_NAME)) { + err("bind loop detected: device: %s is attached to %s\n", + devpath, USBIP_VHCI_DRV_NAME); + return -1; + } + rc = unbind_other(busid); if (rc == UNBIND_ST_FAILED) { err("could not unbind driver from device on busid %s", busid); From d00a0442a99e859ff4b04741abd157fa3bb6b30e Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Wed, 17 Jan 2018 12:08:03 -0700 Subject: [PATCH 1676/3326] usbip: list: don't list devices attached to vhci_hcd commit ef824501f50846589f02173d73ce3fe6021a9d2a upstream. usbip host lists devices attached to vhci_hcd on the same server when user does attach over localhost or specifies the server as the remote. usbip attach -r localhost -b busid or usbip attach -r servername (or server IP) Fix it to check and not list devices that are attached to vhci_hcd. Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- tools/usb/usbip/src/usbip_list.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/usb/usbip/src/usbip_list.c b/tools/usb/usbip/src/usbip_list.c index f1b38e866dd7ea..d65a9f44417449 100644 --- a/tools/usb/usbip/src/usbip_list.c +++ b/tools/usb/usbip/src/usbip_list.c @@ -187,6 +187,7 @@ static int list_devices(bool parsable) const char *busid; char product_name[128]; int ret = -1; + const char *devpath; /* Create libudev context. */ udev = udev_new(); @@ -209,6 +210,14 @@ static int list_devices(bool parsable) path = udev_list_entry_get_name(dev_list_entry); dev = udev_device_new_from_syspath(udev, path); + /* Ignore devices attached to vhci_hcd */ + devpath = udev_device_get_devpath(dev); + if (strstr(devpath, USBIP_VHCI_DRV_NAME)) { + dbg("Skip the device %s already attached to %s\n", + devpath, USBIP_VHCI_DRV_NAME); + continue; + } + /* Get device information. */ idVendor = udev_device_get_sysattr_value(dev, "idVendor"); idProduct = udev_device_get_sysattr_value(dev, "idProduct"); From 1c226267c2259112a9825d09a8c715568c3d8d10 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 18 Jan 2018 14:46:41 +1100 Subject: [PATCH 1677/3326] USB: serial: simple: add Motorola Tetra driver commit 46fe895e22ab3845515ec06b01eaf1282b342e29 upstream. Add new Motorola Tetra (simple) driver for Motorola Solutions TETRA PEI devices. D: Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1 P: Vendor=0cad ProdID=9011 Rev=24.16 S: Manufacturer=Motorola Solutions Inc. S: Product=Motorola Solutions TETRA PEI interface C: #Ifs= 2 Cfg#= 1 Atr=80 MxPwr=500mA I: If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=(none) I: If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=(none) Note that these devices do not support the CDC SET_CONTROL_LINE_STATE request (for any interface). Reported-by: Max Schulze Tested-by: Max Schulze Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/Kconfig | 1 + drivers/usb/serial/usb-serial-simple.c | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index a8d5f2e4878dca..c66b93664d5434 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -63,6 +63,7 @@ config USB_SERIAL_SIMPLE - Google USB serial devices - HP4x calculators - a number of Motorola phones + - Motorola Tetra devices - Novatel Wireless GPS receivers - Siemens USB/MPI adapter. - ViVOtech ViVOpay USB device. diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index e98b6e57b703db..6aa7ff2c1cf74b 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -80,6 +80,11 @@ DEVICE(vivopay, VIVOPAY_IDS); { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */ DEVICE(moto_modem, MOTO_IDS); +/* Motorola Tetra driver */ +#define MOTOROLA_TETRA_IDS() \ + { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */ +DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); + /* Novatel Wireless GPS driver */ #define NOVATEL_IDS() \ { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ @@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = { &google_device, &vivopay_device, &moto_modem_device, + &motorola_tetra_device, &novatel_gps_device, &hp4x_device, &suunto_device, @@ -125,6 +131,7 @@ static const struct usb_device_id id_table[] = { GOOGLE_IDS(), VIVOPAY_IDS(), MOTO_IDS(), + MOTOROLA_TETRA_IDS(), NOVATEL_IDS(), HP4X_IDS(), SUUNTO_IDS(), From 75816a439f6589b8f14d10c4b5f8c835ad76e036 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Tue, 9 Jan 2018 12:30:53 +0530 Subject: [PATCH 1678/3326] usb: f_fs: Prevent gadget unbind if it is already unbound commit ce5bf9a50daf2d9078b505aca1cea22e88ecb94a upstream. Upon usb composition switch there is possibility of ep0 file release happening after gadget driver bind. In case of composition switch from adb to a non-adb composition gadget will never gets bound again resulting into failure of usb device enumeration. Fix this issue by checking FFS_FL_BOUND flag and avoid extra gadget driver unbind if it is already done as part of composition switch. This fixes adb reconnection error reported on Android running v4.4 and above kernel versions. Verified on Hikey running vanilla v4.15-rc7 + few out of tree Mali patches. Reviewed-at: https://android-review.googlesource.com/#/c/582632/ Cc: Felipe Balbi Cc: Greg KH Cc: Michal Nazarewicz Cc: John Stultz Cc: Dmitry Shmidt Cc: Badhri Cc: Android Kernel Team Signed-off-by: Hemant Kumar [AmitP: Cherry-picked it from android-4.14 and updated the commit log] Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/f_fs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 876cdbec130743..c0491dd73f53bc 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -3704,7 +3704,8 @@ static void ffs_closed(struct ffs_data *ffs) ci = opts->func_inst.group.cg_item.ci_parent->ci_parent; ffs_dev_unlock(); - unregister_gadget_item(ci); + if (test_bit(FFS_FL_BOUND, &ffs->flags)) + unregister_gadget_item(ci); return; done: ffs_dev_unlock(); From e0096f93aa42d47f0277e28c3b26eaa5e1ebadd5 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Thu, 11 Jan 2018 13:10:16 +0100 Subject: [PATCH 1679/3326] usb: uas: unconditionally bring back host after reset commit cbeef22fd611c4f47c494b821b2b105b8af970bb upstream. Quoting Hans: If we return 1 from our post_reset handler, then our disconnect handler will be called immediately afterwards. Since pre_reset blocks all scsi requests our disconnect handler will then hang in the scsi_remove_host call. This is esp. bad because our disconnect handler hanging for ever also stops the USB subsys from enumerating any new USB devices, causes commands like lsusb to hang, etc. In practice this happens when unplugging some uas devices because the hub code may see the device as needing a warm-reset and calls usb_reset_device before seeing the disconnect. In this case uas_configure_endpoints fails with -ENODEV. We do not want to print an error for this, so this commit also silences the shost_printk for -ENODEV. ENDQUOTE However, if we do that we better drop any unconditional execution and report to the SCSI subsystem that we have undergone a reset but we are not operational now. Signed-off-by: Oliver Neukum Reported-by: Hans de Goede Signed-off-by: Greg Kroah-Hartman --- drivers/usb/storage/uas.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 63cf981ed81cf8..0bc8543e96b17b 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -1076,20 +1076,19 @@ static int uas_post_reset(struct usb_interface *intf) return 0; err = uas_configure_endpoints(devinfo); - if (err) { + if (err && err != ENODEV) shost_printk(KERN_ERR, shost, "%s: alloc streams error %d after reset", __func__, err); - return 1; - } + /* we must unblock the host in every case lest we deadlock */ spin_lock_irqsave(shost->host_lock, flags); scsi_report_bus_reset(shost, 0); spin_unlock_irqrestore(shost->host_lock, flags); scsi_unblock_requests(shost); - return 0; + return err ? 1 : 0; } static int uas_suspend(struct usb_interface *intf, pm_message_t message) From 2ba5966eddefdf04306bd1da36811c8b83a9664d Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 12 Jan 2018 17:50:02 +1100 Subject: [PATCH 1680/3326] usb/gadget: Fix "high bandwidth" check in usb_gadget_ep_match_desc() commit 11fb37998759c48e4e4c200c974593cbeab25d3e upstream. The current code tries to test for bits that are masked out by usb_endpoint_maxp(). Instead, use the proper accessor to access the new high bandwidth bits. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/udc/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 284bd1a7b57032..794bb4958383fd 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -923,7 +923,7 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget, return 0; /* "high bandwidth" works only at high speed */ - if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11)) + if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp_mult(desc) > 1) return 0; switch (type) { From 7a3cee43e935b9d526ad07f20bf005ba7e74d05b Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Fri, 5 Jan 2018 11:27:07 +0100 Subject: [PATCH 1681/3326] ANDROID: binder: remove waitqueue when thread exits. commit f5cb779ba16334b45ba8946d6bfa6d9834d1527f upstream. binder_poll() passes the thread->wait waitqueue that can be slept on for work. When a thread that uses epoll explicitly exits using BINDER_THREAD_EXIT, the waitqueue is freed, but it is never removed from the corresponding epoll data structure. When the process subsequently exits, the epoll cleanup code tries to access the waitlist, which results in a use-after-free. Prevent this by using POLLFREE when the thread exits. Signed-off-by: Martijn Coenen Reported-by: syzbot Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index a340766b51fead..2ef8bd29e18870 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -4302,6 +4302,18 @@ static int binder_thread_release(struct binder_proc *proc, if (t) spin_lock(&t->lock); } + + /* + * If this thread used poll, make sure we remove the waitqueue + * from any epoll data structures holding it with POLLFREE. + * waitqueue_active() is safe to use here because we're holding + * the inner lock. + */ + if ((thread->looper & BINDER_LOOPER_STATE_POLL) && + waitqueue_active(&thread->wait)) { + wake_up_poll(&thread->wait, POLLHUP | POLLFREE); + } + binder_inner_proc_unlock(thread->proc); if (send_reply) From 86eda3864a704a98d5444ae60654b898731bfea5 Mon Sep 17 00:00:00 2001 From: Ganesh Mahendran Date: Wed, 10 Jan 2018 10:49:05 +0800 Subject: [PATCH 1682/3326] android: binder: use VM_ALLOC to get vm area commit aac6830ec1cb681544212838911cdc57f2638216 upstream. VM_IOREMAP is used to access hardware through a mechanism called I/O mapped memory. Android binder is a IPC machanism which will not access I/O memory. And VM_IOREMAP has alignment requiement which may not needed in binder. __get_vm_area_node() { ... if (flags & VM_IOREMAP) align = 1ul << clamp_t(int, fls_long(size), PAGE_SHIFT, IOREMAP_MAX_ORDER); ... } This patch will save some kernel vm area, especially for 32bit os. In 32bit OS, kernel vm area is only 240MB. We may got below error when launching a app: <3>[ 4482.440053] binder_alloc: binder_alloc_mmap_handler: 15728 8ce67000-8cf65000 get_vm_area failed -12 <3>[ 4483.218817] binder_alloc: binder_alloc_mmap_handler: 15745 8ce67000-8cf65000 get_vm_area failed -12 Signed-off-by: Ganesh Mahendran Acked-by: Martijn Coenen Acked-by: Todd Kjos Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index c2819a3d58a66e..6cb14826867605 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -668,7 +668,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, goto err_already_mapped; } - area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); + area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC); if (area == NULL) { ret = -ENOMEM; failure_string = "get_vm_area"; From e00c5c771877abdb70814ce555f020deab42bd46 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Tue, 2 Jan 2018 12:01:41 +0200 Subject: [PATCH 1683/3326] mei: me: allow runtime pm for platform with D0i3 commit cc365dcf0e56271bedf3de95f88922abe248e951 upstream. >From the pci power documentation: "The driver itself should not call pm_runtime_allow(), though. Instead, it should let user space or some platform-specific code do that (user space can do it via sysfs as stated above)..." However, the S0ix residency cannot be reached without MEI device getting into low power state. Hence, for mei devices that support D0i3, it's better to make runtime power management mandatory and not rely on the system integration such as udev rules. This policy cannot be applied globally as some older platforms were found to have broken power management. Cc: Rafael J. Wysocki Signed-off-by: Tomas Winkler Reviewed-by: Alexander Usyskin Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/pci-me.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 78b3172c8e6e29..d46cb1f0868f04 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -238,8 +238,11 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ mei_me_set_pm_domain(dev); - if (mei_pg_is_enabled(dev)) + if (mei_pg_is_enabled(dev)) { pm_runtime_put_noidle(&pdev->dev); + if (hw->d0i3_supported) + pm_runtime_allow(&pdev->dev); + } dev_dbg(&pdev->dev, "initialization successful.\n"); From aa33208b5a6f0014d9b7d966f61bd6c4f8cd9729 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 27 Dec 2017 14:21:05 +0900 Subject: [PATCH 1684/3326] serial: 8250_of: fix return code when probe function fails to get reset commit b9820a31691b771db37afe2054dd3d3a680c1eed upstream. The error pointer from devm_reset_control_get_optional_shared() is not propagated. One of the most common problem scenarios is it returns -EPROBE_DEFER when the reset controller has not probed yet. In this case, the probe of the reset consumer should be deferred. Fixes: e2860e1f62f2 ("serial: 8250_of: Add reset support") Signed-off-by: Masahiro Yamada Reviewed-by: Philipp Zabel Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_of.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c index 1222c005fb9889..951680640ad5b8 100644 --- a/drivers/tty/serial/8250/8250_of.c +++ b/drivers/tty/serial/8250/8250_of.c @@ -141,8 +141,11 @@ static int of_platform_serial_setup(struct platform_device *ofdev, } info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL); - if (IS_ERR(info->rst)) + if (IS_ERR(info->rst)) { + ret = PTR_ERR(info->rst); goto err_dispose; + } + ret = reset_control_deassert(info->rst); if (ret) goto err_dispose; From 24293a3970e11889cc123d3f0cf9bb9bbb2cf34f Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 4 Jan 2018 07:42:15 +0000 Subject: [PATCH 1685/3326] serial: 8250_uniphier: fix error return code in uniphier_uart_probe() commit 7defa77d2baca4d6eb85234f10f38ab618332e75 upstream. Fix to return a negative error code from the port register error handling case instead of 0, as done elsewhere in this function. Fixes: 39be40ce066d ("serial: 8250_uniphier: fix serial port index in private data") Signed-off-by: Wei Yongjun Acked-by: Masahiro Yamada Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_uniphier.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c index 8a10b10e27aae9..c206f173f912b2 100644 --- a/drivers/tty/serial/8250/8250_uniphier.c +++ b/drivers/tty/serial/8250/8250_uniphier.c @@ -259,12 +259,13 @@ static int uniphier_uart_probe(struct platform_device *pdev) up.dl_read = uniphier_serial_dl_read; up.dl_write = uniphier_serial_dl_write; - priv->line = serial8250_register_8250_port(&up); - if (priv->line < 0) { + ret = serial8250_register_8250_port(&up); + if (ret < 0) { dev_err(dev, "failed to register 8250 port\n"); clk_disable_unprepare(priv->clk); return ret; } + priv->line = ret; platform_set_drvdata(pdev, priv); From e6e7d6baa61a57bf61d0d3647a5037ad1013f4db Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Thu, 4 Jan 2018 15:58:34 -0200 Subject: [PATCH 1686/3326] serial: imx: Only wakeup via RTSDEN bit if the system has RTS/CTS commit 38b1f0fb42f772b8c9aac53593883a18ff5eb9d7 upstream. The wakeup mechanism via RTSDEN bit relies on the system using the RTS/CTS lines, so only allow such wakeup method when the system actually has RTS/CTS support. Fixes: bc85734b126f ("serial: imx: allow waking up on RTSD") Signed-off-by: Fabio Estevam Reviewed-by: Martin Kaiser Acked-by: Fugang Duan Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/imx.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 3657d745e90f0c..521500c575c825 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -2275,12 +2275,14 @@ static void serial_imx_enable_wakeup(struct imx_port *sport, bool on) val &= ~UCR3_AWAKEN; writel(val, sport->port.membase + UCR3); - val = readl(sport->port.membase + UCR1); - if (on) - val |= UCR1_RTSDEN; - else - val &= ~UCR1_RTSDEN; - writel(val, sport->port.membase + UCR1); + if (sport->have_rtscts) { + val = readl(sport->port.membase + UCR1); + if (on) + val |= UCR1_RTSDEN; + else + val &= ~UCR1_RTSDEN; + writel(val, sport->port.membase + UCR1); + } } static int imx_serial_port_suspend_noirq(struct device *dev) From cfd96cbd607ab5d63a33cd63673221f4d572ea8c Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Sun, 7 Jan 2018 15:05:49 +0100 Subject: [PATCH 1687/3326] spi: imx: do not access registers while clocks disabled commit d593574aff0ab846136190b1729c151c736727ec upstream. Since clocks are disabled except during message transfer clocks are also disabled when spi_imx_remove gets called. Accessing registers leads to a freeeze at least on a i.MX 6ULL. Enable clocks before disabling accessing the MXC_CSPICTRL register. Fixes: 9e556dcc55774 ("spi: spi-imx: only enable the clocks when we start to transfer a message") Signed-off-by: Stefan Agner Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- drivers/spi/spi-imx.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index babb15f0799510..d51ca243a02886 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1496,12 +1496,23 @@ static int spi_imx_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct spi_imx_data *spi_imx = spi_master_get_devdata(master); + int ret; spi_bitbang_stop(&spi_imx->bitbang); + ret = clk_enable(spi_imx->clk_per); + if (ret) + return ret; + + ret = clk_enable(spi_imx->clk_ipg); + if (ret) { + clk_disable(spi_imx->clk_per); + return ret; + } + writel(0, spi_imx->base + MXC_CSPICTRL); - clk_unprepare(spi_imx->clk_ipg); - clk_unprepare(spi_imx->clk_per); + clk_disable_unprepare(spi_imx->clk_ipg); + clk_disable_unprepare(spi_imx->clk_per); spi_imx_sdma_exit(spi_imx); spi_master_put(master); From e2d4cdb7b8a4b2f8d4dfc2ae067229595ec0190c Mon Sep 17 00:00:00 2001 From: Fabrice Gasnier Date: Fri, 5 Jan 2018 15:34:54 +0100 Subject: [PATCH 1688/3326] iio: adc: stm32: fix scan of multiple channels with DMA commit 04e491ca9df60ffe8637d00d68e5ab8bc73b30d5 upstream. By default, watermark is set to '1'. Watermark is used to fine tune cyclic dma buffer period. In case watermark is left untouched (e.g. 1) and several channels are being scanned, buffer period is wrongly set (e.g. to 1 sample). As a consequence, data is never pushed to upper layer. Fix buffer period size, by taking scan channels number into account. Fixes: 2763ea0585c9 ("iio: adc: stm32: add optional dma support") Signed-off-by: Fabrice Gasnier Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/stm32-adc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 4df32cf1650e7c..172753b14a4fc6 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -1314,6 +1314,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val) { struct stm32_adc *adc = iio_priv(indio_dev); unsigned int watermark = STM32_DMA_BUFFER_SIZE / 2; + unsigned int rx_buf_sz = STM32_DMA_BUFFER_SIZE; /* * dma cyclic transfers are used, buffer is split into two periods. @@ -1322,7 +1323,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val) * - one buffer (period) driver can push with iio_trigger_poll(). */ watermark = min(watermark, val * (unsigned)(sizeof(u16))); - adc->rx_buf_sz = watermark * 2; + adc->rx_buf_sz = min(rx_buf_sz, watermark * 2 * adc->num_conv); return 0; } From 0db5de4f4e1884c0ceb246bbcf450fbfd01881f7 Mon Sep 17 00:00:00 2001 From: Narcisa Ana Maria Vasile Date: Wed, 6 Dec 2017 18:57:58 +0200 Subject: [PATCH 1689/3326] iio: chemical: ccs811: Fix output of IIO_CONCENTRATION channels commit 8f114acd4e1a9cfa05b70bcc4219bc88197b5c9b upstream. in_concentration_raw should report, according to sysfs-bus-iio documentation, a "Raw (unscaled no offset etc.) percentage reading of a substance." Modify scale to convert from ppm/ppb to percentage: 1 ppm = 0.0001% 1 ppb = 0.0000001% There is no offset needed to convert the ppm/ppb to percentage, so remove offset from IIO_CONCENTRATION (IIO_MOD_CO2) channel. Cc'd stable to reduce chance of userspace breakage in the long run as we fix this wrong bit of ABI usage. Signed-off-by: Narcisa Ana Maria Vasile Reviewed-by: Matt Ranostay Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/chemical/ccs811.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c index 840a6cbd5f0f43..8cfac6d1cec46d 100644 --- a/drivers/iio/chemical/ccs811.c +++ b/drivers/iio/chemical/ccs811.c @@ -91,7 +91,6 @@ static const struct iio_chan_spec ccs811_channels[] = { .channel2 = IIO_MOD_CO2, .modified = 1, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE), .scan_index = 0, .scan_type = { @@ -245,24 +244,18 @@ static int ccs811_read_raw(struct iio_dev *indio_dev, switch (chan->channel2) { case IIO_MOD_CO2: *val = 0; - *val2 = 12834; + *val2 = 100; return IIO_VAL_INT_PLUS_MICRO; case IIO_MOD_VOC: *val = 0; - *val2 = 84246; - return IIO_VAL_INT_PLUS_MICRO; + *val2 = 100; + return IIO_VAL_INT_PLUS_NANO; default: return -EINVAL; } default: return -EINVAL; } - case IIO_CHAN_INFO_OFFSET: - if (!(chan->type == IIO_CONCENTRATION && - chan->channel2 == IIO_MOD_CO2)) - return -EINVAL; - *val = -400; - return IIO_VAL_INT; default: return -EINVAL; } From aad757b657176409e803b487bb76becf729a4a58 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 11 Jan 2018 11:12:55 +0000 Subject: [PATCH 1690/3326] test_firmware: fix missing unlock on error in config_num_requests_store() commit a5e1923356505e46476c2fb518559b7a4d9d25b1 upstream. Add the missing unlock before return from function config_num_requests_store() in the error handling case. Fixes: c92316bf8e94 ("test_firmware: add batched firmware tests") Signed-off-by: Wei Yongjun Signed-off-by: Greg Kroah-Hartman --- lib/test_firmware.c | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 64a4c76cba2ba8..e7008688769bbb 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -371,6 +371,7 @@ static ssize_t config_num_requests_store(struct device *dev, if (test_fw_config->reqs) { pr_err("Must call release_all_firmware prior to changing config\n"); rc = -EINVAL; + mutex_unlock(&test_fw_mutex); goto out; } mutex_unlock(&test_fw_mutex); From fced3c99e7264237aeba0b420e892cfa7bb36b9c Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Wed, 17 Jan 2018 15:46:18 -0800 Subject: [PATCH 1691/3326] Input: synaptics-rmi4 - unmask F03 interrupts when port is opened commit 6abe534f0776d2437c8302f58d8eb5abd483e926 upstream. Currently we register the pass-through serio port when we probe the F03 RMI function, and then, in sensor configure phase, we unmask interrupts. Unfortunately this is too late, as other drivers are free probe devices attached to the serio port as soon as it is probed. Because interrupts are masked, the IO times out, which may result in not being able to detect trackpoints on the pass-through port. To fix the issue we implement open() and close() methods for the pass-through serio port and unmask interrupts from there. We also move creation of the pass-through port form probe to configure stage, as RMI driver does not enable transport interrupt until all functions are probed (we should change this, but this is a separate topic). We also try to clear the pending data before unmasking interrupts, because some devices like to spam the system with multiple 0xaa 0x00 announcements, which may interfere with us trying to query ID of the device. Fixes: c5e8848fc98e ("Input: synaptics-rmi4 - add support for F03") Reviewed-by: Lyude Paul Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/rmi4/rmi_f03.c | 64 ++++++++++++++++++++++++++++++------ 1 file changed, 54 insertions(+), 10 deletions(-) diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c index ad71a5e768dc46..7ccbb370a9a81e 100644 --- a/drivers/input/rmi4/rmi_f03.c +++ b/drivers/input/rmi4/rmi_f03.c @@ -32,6 +32,7 @@ struct f03_data { struct rmi_function *fn; struct serio *serio; + bool serio_registered; unsigned int overwrite_buttons; @@ -138,6 +139,37 @@ static int rmi_f03_initialize(struct f03_data *f03) return 0; } +static int rmi_f03_pt_open(struct serio *serio) +{ + struct f03_data *f03 = serio->port_data; + struct rmi_function *fn = f03->fn; + const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE; + const u16 data_addr = fn->fd.data_base_addr + RMI_F03_OB_OFFSET; + u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE]; + int error; + + /* + * Consume any pending data. Some devices like to spam with + * 0xaa 0x00 announcements which may confuse us as we try to + * probe the device. + */ + error = rmi_read_block(fn->rmi_dev, data_addr, &obs, ob_len); + if (!error) + rmi_dbg(RMI_DEBUG_FN, &fn->dev, + "%s: Consumed %*ph (%d) from PS2 guest\n", + __func__, ob_len, obs, ob_len); + + return fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask); +} + +static void rmi_f03_pt_close(struct serio *serio) +{ + struct f03_data *f03 = serio->port_data; + struct rmi_function *fn = f03->fn; + + fn->rmi_dev->driver->clear_irq_bits(fn->rmi_dev, fn->irq_mask); +} + static int rmi_f03_register_pt(struct f03_data *f03) { struct serio *serio; @@ -148,6 +180,8 @@ static int rmi_f03_register_pt(struct f03_data *f03) serio->id.type = SERIO_PS_PSTHRU; serio->write = rmi_f03_pt_write; + serio->open = rmi_f03_pt_open; + serio->close = rmi_f03_pt_close; serio->port_data = f03; strlcpy(serio->name, "Synaptics RMI4 PS/2 pass-through", @@ -184,17 +218,27 @@ static int rmi_f03_probe(struct rmi_function *fn) f03->device_count); dev_set_drvdata(dev, f03); - - error = rmi_f03_register_pt(f03); - if (error) - return error; - return 0; } static int rmi_f03_config(struct rmi_function *fn) { - fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask); + struct f03_data *f03 = dev_get_drvdata(&fn->dev); + int error; + + if (!f03->serio_registered) { + error = rmi_f03_register_pt(f03); + if (error) + return error; + + f03->serio_registered = true; + } else { + /* + * We must be re-configuring the sensor, just enable + * interrupts for this function. + */ + fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask); + } return 0; } @@ -204,7 +248,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) struct rmi_device *rmi_dev = fn->rmi_dev; struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); struct f03_data *f03 = dev_get_drvdata(&fn->dev); - u16 data_addr = fn->fd.data_base_addr; + const u16 data_addr = fn->fd.data_base_addr + RMI_F03_OB_OFFSET; const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE; u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE]; u8 ob_status; @@ -226,8 +270,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) drvdata->attn_data.size -= ob_len; } else { /* Grab all of the data registers, and check them for data */ - error = rmi_read_block(fn->rmi_dev, data_addr + RMI_F03_OB_OFFSET, - &obs, ob_len); + error = rmi_read_block(fn->rmi_dev, data_addr, &obs, ob_len); if (error) { dev_err(&fn->dev, "%s: Failed to read F03 output buffers: %d\n", @@ -266,7 +309,8 @@ static void rmi_f03_remove(struct rmi_function *fn) { struct f03_data *f03 = dev_get_drvdata(&fn->dev); - serio_unregister_port(f03->serio); + if (f03->serio_registered) + serio_unregister_port(f03->serio); } struct rmi_function_handler rmi_f03_handler = { From 01ab9886ea7815d41a4ee435524c3fce127e7290 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Wed, 17 Jan 2018 16:18:27 -0800 Subject: [PATCH 1692/3326] Input: synaptics-rmi4 - do not delete interrupt memory too early commit a1ab69021a584d952e6548a44b93760547b1b6b5 upstream. We want to free memory reserved for interrupt mask handling only after we free functions, as function drivers might want to mask interrupts. This is needed for the followup patch to the F03 that would implement unmasking and masking interrupts from the serio pass-through port open() and close() methods. Reviewed-by: Lyude Paul Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/rmi4/rmi_driver.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index 141ea228aac6a3..f5954981e9ee5b 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -41,6 +41,13 @@ void rmi_free_function_list(struct rmi_device *rmi_dev) rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n"); + /* Doing it in the reverse order so F01 will be removed last */ + list_for_each_entry_safe_reverse(fn, tmp, + &data->function_list, node) { + list_del(&fn->node); + rmi_unregister_function(fn); + } + devm_kfree(&rmi_dev->dev, data->irq_memory); data->irq_memory = NULL; data->irq_status = NULL; @@ -50,13 +57,6 @@ void rmi_free_function_list(struct rmi_device *rmi_dev) data->f01_container = NULL; data->f34_container = NULL; - - /* Doing it in the reverse order so F01 will be removed last */ - list_for_each_entry_safe_reverse(fn, tmp, - &data->function_list, node) { - list_del(&fn->node); - rmi_unregister_function(fn); - } } static int reset_one_function(struct rmi_function *fn) From 04178b1709b6e98c929ae7ca2668c85334e886b2 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Tue, 16 Jan 2018 09:10:02 +0000 Subject: [PATCH 1693/3326] x86/efi: Clarify that reset attack mitigation needs appropriate userspace commit a5c03c31af2291f13689d11760c0b59fb70c9a5a upstream. Some distributions have turned on the reset attack mitigation feature, which is designed to force the platform to clear the contents of RAM if the machine is shut down uncleanly. However, in order for the platform to be able to determine whether the shutdown was clean or not, userspace has to be configured to clear the MemoryOverwriteRequest flag on shutdown - otherwise the firmware will end up clearing RAM on every reboot, which is unnecessarily time consuming. Add some additional clarity to the kconfig text to reduce the risk of systems being configured this way. Signed-off-by: Matthew Garrett Acked-by: Ard Biesheuvel Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/efi/Kconfig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 2b4c39fdfa9141..86210f75d23331 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -159,7 +159,10 @@ config RESET_ATTACK_MITIGATION using the TCG Platform Reset Attack Mitigation specification. This protects against an attacker forcibly rebooting the system while it still contains secrets in RAM, booting another OS and extracting the - secrets. + secrets. This should only be enabled when userland is configured to + clear the MemoryOverwriteRequest flag on clean shutdown after secrets + have been evicted, since otherwise it will trigger even on clean + reboots. endmenu From 0146985addc322ca518d27fecfe98ae5acda7c1e Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 3 Feb 2018 17:39:25 +0100 Subject: [PATCH 1694/3326] Linux 4.14.17 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 90a4bffa844673..7ed993896dd54c 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 16 +SUBLEVEL = 17 EXTRAVERSION = NAME = Petit Gorille From 16d3d10d29f753d0247d94f5e889e93dd13f56bb Mon Sep 17 00:00:00 2001 From: "Liu, Changcheng" Date: Thu, 14 Dec 2017 15:32:48 -0800 Subject: [PATCH 1695/3326] scripts/faddr2line: fix CROSS_COMPILE unset error commit 4cc90b4cc3d4955f79eae4f7f9d64e67e17b468e upstream. faddr2line hit var unbound error when CROSS_COMPILE isn't set since nounset option is set in bash script. Link: http://lkml.kernel.org/r/20171206013022.GA83929@sofia Fixes: 95a879825419 ("scripts/faddr2line: extend usage on generic arch") Signed-off-by: Liu Changcheng Reported-by: Richard Weinberger Reviewed-by: Richard Weinberger Cc: Thomas Gleixner Cc: Greg Kroah-Hartman Cc: Philippe Ombredanne Cc: NeilBrown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- scripts/faddr2line | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/faddr2line b/scripts/faddr2line index 39e07d8574dd78..7721d5b2b0c04e 100755 --- a/scripts/faddr2line +++ b/scripts/faddr2line @@ -44,10 +44,10 @@ set -o errexit set -o nounset -READELF="${CROSS_COMPILE}readelf" -ADDR2LINE="${CROSS_COMPILE}addr2line" -SIZE="${CROSS_COMPILE}size" -NM="${CROSS_COMPILE}nm" +READELF="${CROSS_COMPILE:-}readelf" +ADDR2LINE="${CROSS_COMPILE:-}addr2line" +SIZE="${CROSS_COMPILE:-}size" +NM="${CROSS_COMPILE:-}nm" command -v awk >/dev/null 2>&1 || die "awk isn't installed" command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed" From 517bdccc3af650316046801fdcd828d7b6aa42ba Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 16 Jan 2018 21:20:05 +1100 Subject: [PATCH 1696/3326] powerpc/64s: Wire up cpu_show_meltdown() commit fd6e440f20b1a4304553775fc55938848ff617c9 upstream. The recent commit 87590ce6e373 ("sysfs/cpu: Add vulnerability folder") added a generic folder and set of files for reporting information on CPU vulnerabilities. One of those was for meltdown: /sys/devices/system/cpu/vulnerabilities/meltdown This commit wires up that file for 64-bit Book3S powerpc. For now we default to "Vulnerable" unless the RFI flush is enabled. That may not actually be true on all hardware, further patches will refine the reporting based on the CPU/platform etc. But for now we default to being pessimists. Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/Kconfig | 1 + arch/powerpc/kernel/setup_64.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index cb782ac1c35d5c..fe418226df7fc9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -164,6 +164,7 @@ config PPC select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE + select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64 select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL select GENERIC_SMP_IDLE_THREAD diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 935059cb9e403e..4fac5cca19e10f 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -884,4 +884,12 @@ void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) if (!no_rfi_flush) rfi_flush_enable(enable); } + +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) +{ + if (rfi_flush) + return sprintf(buf, "Mitigation: RFI Flush\n"); + + return sprintf(buf, "Vulnerable\n"); +} #endif /* CONFIG_PPC_BOOK3S_64 */ From d6eded6c94530cb2f359f1fbffcb5f92ee0fa8b2 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 16 Jan 2018 22:17:18 +1100 Subject: [PATCH 1697/3326] powerpc/64s: Allow control of RFI flush via debugfs commit 236003e6b5443c45c18e613d2b0d776a9f87540e upstream. Expose the state of the RFI flush (enabled/disabled) via debugfs, and allow it to be enabled/disabled at runtime. eg: $ cat /sys/kernel/debug/powerpc/rfi_flush 1 $ echo 0 > /sys/kernel/debug/powerpc/rfi_flush $ cat /sys/kernel/debug/powerpc/rfi_flush 0 Signed-off-by: Michael Ellerman Reviewed-by: Nicholas Piggin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/setup_64.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 4fac5cca19e10f..9527a4c6cbc27c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -38,6 +38,7 @@ #include #include +#include #include #include #include @@ -885,6 +886,35 @@ void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) rfi_flush_enable(enable); } +#ifdef CONFIG_DEBUG_FS +static int rfi_flush_set(void *data, u64 val) +{ + if (val == 1) + rfi_flush_enable(true); + else if (val == 0) + rfi_flush_enable(false); + else + return -EINVAL; + + return 0; +} + +static int rfi_flush_get(void *data, u64 *val) +{ + *val = rfi_flush ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); + +static __init int rfi_flush_debugfs_init(void) +{ + debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); + return 0; +} +device_initcall(rfi_flush_debugfs_init); +#endif + ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { if (rfi_flush) From 4ce354deed2cc18b5b2175cf487011e375e89c75 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Mon, 22 Jan 2018 17:09:34 -0500 Subject: [PATCH 1698/3326] x86/retpoline: Remove the esp/rsp thunk commit 1df37383a8aeabb9b418698f0bcdffea01f4b1b2 It doesn't make sense to have an indirect call thunk with esp/rsp as retpoline code won't work correctly with the stack pointer register. Removing it will help compiler writers to catch error in case such a thunk call is emitted incorrectly. Fixes: 76b043848fd2 ("x86/retpoline: Add initial retpoline support") Suggested-by: Jeff Law Signed-off-by: Waiman Long Signed-off-by: Thomas Gleixner Acked-by: David Woodhouse Cc: Tom Lendacky Cc: Kees Cook Cc: Andi Kleen Cc: Tim Chen Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Jiri Kosina Cc: Andy Lutomirski Cc: Dave Hansen Cc: Josh Poimboeuf Cc: Arjan van de Ven Cc: Greg Kroah-Hartman Cc: Paul Turner Link: https://lkml.kernel.org/r/1516658974-27852-1-git-send-email-longman@redhat.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/asm-prototypes.h | 1 - arch/x86/lib/retpoline.S | 1 - 2 files changed, 2 deletions(-) diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index 0927cdc4f94601..1908214b91257f 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h @@ -38,5 +38,4 @@ INDIRECT_THUNK(dx) INDIRECT_THUNK(si) INDIRECT_THUNK(di) INDIRECT_THUNK(bp) -INDIRECT_THUNK(sp) #endif /* CONFIG_RETPOLINE */ diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index dfb2ba91b670dc..c909961e678a59 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -36,7 +36,6 @@ GENERATE_THUNK(_ASM_DX) GENERATE_THUNK(_ASM_SI) GENERATE_THUNK(_ASM_DI) GENERATE_THUNK(_ASM_BP) -GENERATE_THUNK(_ASM_SP) #ifdef CONFIG_64BIT GENERATE_THUNK(r8) GENERATE_THUNK(r9) From 76bee09efb89b0a71566bec5f36d9e65d9034aef Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 25 Jan 2018 10:58:13 +0100 Subject: [PATCH 1699/3326] KVM: x86: Make indirect calls in emulator speculation safe commit 1a29b5b7f347a1a9230c1e0af5b37e3e571588ab Replace the indirect calls with CALL_NOSPEC. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Reviewed-by: David Woodhouse Cc: Andrea Arcangeli Cc: Andi Kleen Cc: Ashok Raj Cc: Greg KH Cc: Jun Nakajima Cc: David Woodhouse Cc: Linus Torvalds Cc: rga@amazon.de Cc: Dave Hansen Cc: Asit Mallick Cc: Andy Lutomirski Cc: Josh Poimboeuf Cc: Jason Baron Cc: Paolo Bonzini Cc: Dan Williams Cc: Arjan Van De Ven Cc: Tim Chen Link: https://lkml.kernel.org/r/20180125095843.595615683@infradead.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/emulate.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index eca6a89f232608..fab073b195288d 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "x86.h" #include "tss.h" @@ -1021,8 +1022,8 @@ static __always_inline u8 test_cc(unsigned int condition, unsigned long flags) void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; - asm("push %[flags]; popf; call *%[fastop]" - : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); + asm("push %[flags]; popf; " CALL_NOSPEC + : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags)); return rc; } @@ -5350,9 +5351,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) if (!(ctxt->d & ByteOp)) fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; - asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" + asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n" : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), - [fastop]"+S"(fop), ASM_CALL_CONSTRAINT + [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT : "c"(ctxt->src2.val)); ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); From c927726674c7dfe42e8de585b70ffbd9ec775fc6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 25 Jan 2018 10:58:14 +0100 Subject: [PATCH 1700/3326] KVM: VMX: Make indirect call speculation safe commit c940a3fb1e2e9b7d03228ab28f375fb5a47ff699 Replace indirect call with CALL_NOSPEC. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Reviewed-by: David Woodhouse Cc: Andrea Arcangeli Cc: Andi Kleen Cc: Ashok Raj Cc: Greg KH Cc: Jun Nakajima Cc: David Woodhouse Cc: Linus Torvalds Cc: rga@amazon.de Cc: Dave Hansen Cc: Asit Mallick Cc: Andy Lutomirski Cc: Josh Poimboeuf Cc: Jason Baron Cc: Paolo Bonzini Cc: Dan Williams Cc: Arjan Van De Ven Cc: Tim Chen Link: https://lkml.kernel.org/r/20180125095843.645776917@infradead.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a45063a9219c05..213c092f2a9fb9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -9118,14 +9118,14 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) #endif "pushf\n\t" __ASM_SIZE(push) " $%c[cs]\n\t" - "call *%[entry]\n\t" + CALL_NOSPEC : #ifdef CONFIG_X86_64 [sp]"=&r"(tmp), #endif ASM_CALL_CONSTRAINT : - [entry]"r"(entry), + THUNK_TARGET(entry), [ss]"i"(__KERNEL_DS), [cs]"i"(__KERNEL_CS) ); From 86b5b1eb18aa49eedff2c9a9087fc48d03099844 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 25 Jan 2018 15:50:28 -0800 Subject: [PATCH 1701/3326] module/retpoline: Warn about missing retpoline in module commit caf7501a1b4ec964190f31f9c3f163de252273b8 There's a risk that a kernel which has full retpoline mitigations becomes vulnerable when a module gets loaded that hasn't been compiled with the right compiler or the right option. To enable detection of that mismatch at module load time, add a module info string "retpoline" at build time when the module was compiled with retpoline support. This only covers compiled C source, but assembler source or prebuilt object files are not checked. If a retpoline enabled kernel detects a non retpoline protected module at load time, print a warning and report it in the sysfs vulnerability file. [ tglx: Massaged changelog ] Signed-off-by: Andi Kleen Signed-off-by: Thomas Gleixner Cc: David Woodhouse Cc: gregkh@linuxfoundation.org Cc: torvalds@linux-foundation.org Cc: jeyu@kernel.org Cc: arjan@linux.intel.com Link: https://lkml.kernel.org/r/20180125235028.31211-1-andi@firstfloor.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/bugs.c | 17 ++++++++++++++++- include/linux/module.h | 9 +++++++++ kernel/module.c | 11 +++++++++++ scripts/mod/modpost.c | 9 +++++++++ 4 files changed, 45 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 390b3dc3d43827..4a39d7bb4bd87b 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -93,6 +94,19 @@ static const char *spectre_v2_strings[] = { #define pr_fmt(fmt) "Spectre V2 mitigation: " fmt static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; +static bool spectre_v2_bad_module; + +#ifdef RETPOLINE +bool retpoline_module_ok(bool has_retpoline) +{ + if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) + return true; + + pr_err("System may be vunerable to spectre v2\n"); + spectre_v2_bad_module = true; + return false; +} +#endif static void __init spec2_print_if_insecure(const char *reason) { @@ -278,6 +292,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) return sprintf(buf, "Not affected\n"); - return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]); + return sprintf(buf, "%s%s\n", spectre_v2_strings[spectre_v2_enabled], + spectre_v2_bad_module ? " - vulnerable module loaded" : ""); } #endif diff --git a/include/linux/module.h b/include/linux/module.h index fe5aa373670729..b1cc541f2ddf23 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -794,6 +794,15 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr, static inline void module_bug_cleanup(struct module *mod) {} #endif /* CONFIG_GENERIC_BUG */ +#ifdef RETPOLINE +extern bool retpoline_module_ok(bool has_retpoline); +#else +static inline bool retpoline_module_ok(bool has_retpoline) +{ + return true; +} +#endif + #ifdef CONFIG_MODULE_SIG static inline bool module_sig_ok(struct module *module) { diff --git a/kernel/module.c b/kernel/module.c index de66ec82599289..690c0651c40f4c 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2855,6 +2855,15 @@ static int check_modinfo_livepatch(struct module *mod, struct load_info *info) } #endif /* CONFIG_LIVEPATCH */ +static void check_modinfo_retpoline(struct module *mod, struct load_info *info) +{ + if (retpoline_module_ok(get_modinfo(info, "retpoline"))) + return; + + pr_warn("%s: loading module not compiled with retpoline compiler.\n", + mod->name); +} + /* Sets info->hdr and info->len. */ static int copy_module_from_user(const void __user *umod, unsigned long len, struct load_info *info) @@ -3021,6 +3030,8 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags) add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); } + check_modinfo_retpoline(mod, info); + if (get_modinfo(info, "staging")) { add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); pr_warn("%s: module is from the staging directory, the quality " diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 98314b400a95cd..54deaa1066cf04 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -2165,6 +2165,14 @@ static void add_intree_flag(struct buffer *b, int is_intree) buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n"); } +/* Cannot check for assembler */ +static void add_retpoline(struct buffer *b) +{ + buf_printf(b, "\n#ifdef RETPOLINE\n"); + buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n"); + buf_printf(b, "#endif\n"); +} + static void add_staging_flag(struct buffer *b, const char *name) { static const char *staging_dir = "drivers/staging"; @@ -2506,6 +2514,7 @@ int main(int argc, char **argv) err |= check_modname_len(mod); add_header(&buf, mod); add_intree_flag(&buf, !external_module); + add_retpoline(&buf); add_staging_flag(&buf, mod->name); err |= add_versions(&buf, mod); add_depends(&buf, mod, modules); From 76c4bd53969bcbaf19fe12218785411b390fd14b Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 25 Jan 2018 16:14:09 +0000 Subject: [PATCH 1702/3326] x86/cpufeatures: Add CPUID_7_EDX CPUID leaf commit 95ca0ee8636059ea2800dfbac9ecac6212d6b38f This is a pure feature bits leaf. There are two AVX512 feature bits in it already which were handled as scattered bits, and three more from this leaf are going to be added for speculation control features. Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Reviewed-by: Greg Kroah-Hartman Reviewed-by: Borislav Petkov Cc: gnomes@lxorguk.ukuu.org.uk Cc: ak@linux.intel.com Cc: ashok.raj@intel.com Cc: dave.hansen@intel.com Cc: karahmed@amazon.de Cc: arjan@linux.intel.com Cc: torvalds@linux-foundation.org Cc: peterz@infradead.org Cc: bp@alien8.de Cc: pbonzini@redhat.com Cc: tim.c.chen@linux.intel.com Cc: gregkh@linux-foundation.org Link: https://lkml.kernel.org/r/1516896855-7642-2-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeature.h | 7 +++++-- arch/x86/include/asm/cpufeatures.h | 8 +++++--- arch/x86/include/asm/disabled-features.h | 3 ++- arch/x86/include/asm/required-features.h | 3 ++- arch/x86/kernel/cpu/common.c | 1 + arch/x86/kernel/cpu/scattered.c | 2 -- 6 files changed, 15 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index ea9a7dde62e5c4..70eddb3922ff7b 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -29,6 +29,7 @@ enum cpuid_leafs CPUID_8000_000A_EDX, CPUID_7_ECX, CPUID_8000_0007_EBX, + CPUID_7_EDX, }; #ifdef CONFIG_X86_FEATURE_NAMES @@ -79,8 +80,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \ REQUIRED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 18)) + BUILD_BUG_ON_ZERO(NCAPINTS != 19)) #define DISABLED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ @@ -101,8 +103,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \ DISABLED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 18)) + BUILD_BUG_ON_ZERO(NCAPINTS != 19)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 25b9375c1484b4..7b25cf30d25d2e 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 18 /* N 32-bit words worth of info */ +#define NCAPINTS 19 /* N 32-bit words worth of info */ #define NBUGINTS 1 /* N 32-bit bug flags */ /* @@ -206,8 +206,6 @@ #define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ -#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */ -#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */ @@ -319,6 +317,10 @@ #define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */ #define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */ +/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ +#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ +#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ + /* * BUG word(s) */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index e428e16dd82247..c6a3af198294e6 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -71,6 +71,7 @@ #define DISABLED_MASK15 0 #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57) #define DISABLED_MASK17 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) +#define DISABLED_MASK18 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index d91ba04dd00709..fb3a6de7440bce 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -106,6 +106,7 @@ #define REQUIRED_MASK15 0 #define REQUIRED_MASK16 (NEED_LA57) #define REQUIRED_MASK17 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) +#define REQUIRED_MASK18 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 372ba3fb400fad..e5d66e93ed8115 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -745,6 +745,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); c->x86_capability[CPUID_7_0_EBX] = ebx; c->x86_capability[CPUID_7_ECX] = ecx; + c->x86_capability[CPUID_7_EDX] = edx; } /* Extended state features: level 0x0000000d */ diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index d0e69769abfd39..df11f5d604be83 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -21,8 +21,6 @@ struct cpuid_bit { static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, - { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 }, - { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 }, { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 }, { X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 }, { X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 }, From 343c91242d092852ab22411780f886317d7001aa Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 25 Jan 2018 16:14:10 +0000 Subject: [PATCH 1703/3326] x86/cpufeatures: Add Intel feature bits for Speculation Control commit fc67dd70adb711a45d2ef34e12d1a8be75edde61 Add three feature bits exposed by new microcode on Intel CPUs for speculation control. Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Reviewed-by: Greg Kroah-Hartman Reviewed-by: Borislav Petkov Cc: gnomes@lxorguk.ukuu.org.uk Cc: ak@linux.intel.com Cc: ashok.raj@intel.com Cc: dave.hansen@intel.com Cc: karahmed@amazon.de Cc: arjan@linux.intel.com Cc: torvalds@linux-foundation.org Cc: peterz@infradead.org Cc: bp@alien8.de Cc: pbonzini@redhat.com Cc: tim.c.chen@linux.intel.com Cc: gregkh@linux-foundation.org Link: https://lkml.kernel.org/r/1516896855-7642-3-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 7b25cf30d25d2e..0a510700271622 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -320,6 +320,9 @@ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_SPEC_CTRL (18*32+26) /* Speculation Control (IBRS + IBPB) */ +#define X86_FEATURE_STIBP (18*32+27) /* Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ /* * BUG word(s) From 15ee82be40b6895edeb324435621ef1869aac173 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 25 Jan 2018 16:14:11 +0000 Subject: [PATCH 1704/3326] x86/cpufeatures: Add AMD feature bits for Speculation Control commit 5d10cbc91d9eb5537998b65608441b592eec65e7 AMD exposes the PRED_CMD/SPEC_CTRL MSRs slightly differently to Intel. See http://lkml.kernel.org/r/2b3e25cc-286d-8bd0-aeaf-9ac4aae39de8@amd.com Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Reviewed-by: Greg Kroah-Hartman Cc: Tom Lendacky Cc: gnomes@lxorguk.ukuu.org.uk Cc: ak@linux.intel.com Cc: ashok.raj@intel.com Cc: dave.hansen@intel.com Cc: karahmed@amazon.de Cc: arjan@linux.intel.com Cc: torvalds@linux-foundation.org Cc: peterz@infradead.org Cc: bp@alien8.de Cc: pbonzini@redhat.com Cc: tim.c.chen@linux.intel.com Cc: gregkh@linux-foundation.org Link: https://lkml.kernel.org/r/1516896855-7642-4-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 0a510700271622..ae3212f14dece4 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -269,6 +269,9 @@ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ +#define X86_FEATURE_AMD_PRED_CMD (13*32+12) /* Prediction Command MSR (AMD) */ +#define X86_FEATURE_AMD_SPEC_CTRL (13*32+14) /* Speculation Control MSR only (AMD) */ +#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors (AMD) */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ From a65710dc584c1c424eb358a1f6e8c01d706e9f81 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 25 Jan 2018 16:14:12 +0000 Subject: [PATCH 1705/3326] x86/msr: Add definitions for new speculation control MSRs commit 1e340c60d0dd3ae07b5bedc16a0469c14b9f3410 Add MSR and bit definitions for SPEC_CTRL, PRED_CMD and ARCH_CAPABILITIES. See Intel's 336996-Speculative-Execution-Side-Channel-Mitigations.pdf Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Reviewed-by: Greg Kroah-Hartman Cc: gnomes@lxorguk.ukuu.org.uk Cc: ak@linux.intel.com Cc: ashok.raj@intel.com Cc: dave.hansen@intel.com Cc: karahmed@amazon.de Cc: arjan@linux.intel.com Cc: torvalds@linux-foundation.org Cc: peterz@infradead.org Cc: bp@alien8.de Cc: pbonzini@redhat.com Cc: tim.c.chen@linux.intel.com Cc: gregkh@linux-foundation.org Link: https://lkml.kernel.org/r/1516896855-7642-5-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/msr-index.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index fa11fb1fa570e9..eb83ff1bae8f1a 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -39,6 +39,13 @@ /* Intel MSRs. Some also available on other CPUs */ +#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ +#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ +#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ + +#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ +#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ + #define MSR_PPIN_CTL 0x0000004e #define MSR_PPIN 0x0000004f @@ -57,6 +64,11 @@ #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) #define MSR_MTRRcap 0x000000fe + +#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a +#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ +#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ + #define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL3 0x0000011e From d9b47a41160c71b217582b9876129e3fbf587225 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 25 Jan 2018 16:14:13 +0000 Subject: [PATCH 1706/3326] x86/pti: Do not enable PTI on CPUs which are not vulnerable to Meltdown commit fec9434a12f38d3aeafeb75711b71d8a1fdef621 Also, for CPUs which don't speculate at all, don't report that they're vulnerable to the Spectre variants either. Leave the cpu_no_meltdown[] match table with just X86_VENDOR_AMD in it for now, even though that could be done with a simple comparison, on the assumption that we'll have more to add. Based on suggestions from Dave Hansen and Alan Cox. Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Reviewed-by: Greg Kroah-Hartman Reviewed-by: Borislav Petkov Acked-by: Dave Hansen Cc: gnomes@lxorguk.ukuu.org.uk Cc: ak@linux.intel.com Cc: ashok.raj@intel.com Cc: karahmed@amazon.de Cc: arjan@linux.intel.com Cc: torvalds@linux-foundation.org Cc: peterz@infradead.org Cc: bp@alien8.de Cc: pbonzini@redhat.com Cc: tim.c.chen@linux.intel.com Cc: gregkh@linux-foundation.org Link: https://lkml.kernel.org/r/1516896855-7642-6-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/common.c | 48 ++++++++++++++++++++++++++++++++---- 1 file changed, 43 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index e5d66e93ed8115..970ee06dc8aad6 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -47,6 +47,8 @@ #include #include #include +#include +#include #ifdef CONFIG_X86_LOCAL_APIC #include @@ -853,6 +855,41 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) #endif } +static const __initdata struct x86_cpu_id cpu_no_speculation[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY }, + { X86_VENDOR_CENTAUR, 5 }, + { X86_VENDOR_INTEL, 5 }, + { X86_VENDOR_NSC, 5 }, + { X86_VENDOR_ANY, 4 }, + {} +}; + +static const __initdata struct x86_cpu_id cpu_no_meltdown[] = { + { X86_VENDOR_AMD }, + {} +}; + +static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c) +{ + u64 ia32_cap = 0; + + if (x86_match_cpu(cpu_no_meltdown)) + return false; + + if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); + + /* Rogue Data Cache Load? No! */ + if (ia32_cap & ARCH_CAP_RDCL_NO) + return false; + + return true; +} + /* * Do minimum CPU detection early. * Fields really needed: vendor, cpuid_level, family, model, mask, @@ -900,11 +937,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) setup_force_cpu_cap(X86_FEATURE_ALWAYS); - if (c->x86_vendor != X86_VENDOR_AMD) - setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); - - setup_force_cpu_bug(X86_BUG_SPECTRE_V1); - setup_force_cpu_bug(X86_BUG_SPECTRE_V2); + if (!x86_match_cpu(cpu_no_speculation)) { + if (cpu_vulnerable_to_meltdown(c)) + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); + setup_force_cpu_bug(X86_BUG_SPECTRE_V1); + setup_force_cpu_bug(X86_BUG_SPECTRE_V2); + } fpu__init_system(c); From 0fd222b19766ba28cf6e45b6ad461f1a303831bd Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 25 Jan 2018 16:14:14 +0000 Subject: [PATCH 1707/3326] x86/cpufeature: Blacklist SPEC_CTRL/PRED_CMD on early Spectre v2 microcodes commit a5b2966364538a0e68c9fa29bc0a3a1651799035 This doesn't refuse to load the affected microcodes; it just refuses to use the Spectre v2 mitigation features if they're detected, by clearing the appropriate feature bits. The AMD CPUID bits are handled here too, because hypervisors *may* have been exposing those bits even on Intel chips, for fine-grained control of what's available. It is non-trivial to use x86_match_cpu() for this table because that doesn't handle steppings. And the approach taken in commit bd9240a18 almost made me lose my lunch. Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Reviewed-by: Greg Kroah-Hartman Cc: gnomes@lxorguk.ukuu.org.uk Cc: ak@linux.intel.com Cc: ashok.raj@intel.com Cc: dave.hansen@intel.com Cc: karahmed@amazon.de Cc: arjan@linux.intel.com Cc: torvalds@linux-foundation.org Cc: peterz@infradead.org Cc: bp@alien8.de Cc: pbonzini@redhat.com Cc: tim.c.chen@linux.intel.com Cc: gregkh@linux-foundation.org Link: https://lkml.kernel.org/r/1516896855-7642-7-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/intel.c | 66 +++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b720dacac05195..5faa487d047771 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -102,6 +102,59 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) ELF_HWCAP2 |= HWCAP2_RING3MWAIT; } +/* + * Early microcode releases for the Spectre v2 mitigation were broken. + * Information taken from; + * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf + * - https://kb.vmware.com/s/article/52345 + * - Microcode revisions observed in the wild + * - Release note from 20180108 microcode release + */ +struct sku_microcode { + u8 model; + u8 stepping; + u32 microcode; +}; +static const struct sku_microcode spectre_bad_microcodes[] = { + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 }, + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 }, + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 }, + { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 }, + { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 }, + { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, + { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, + { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 }, + { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 }, + { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, + { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, + { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, + { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 }, + { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, + { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 }, + { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 }, + { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 }, + { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, + { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, + { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, + /* Updated in the 20180108 release; blacklist until we know otherwise */ + { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 }, + /* Observed in the wild */ + { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b }, + { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 }, +}; + +static bool bad_spectre_microcode(struct cpuinfo_x86 *c) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { + if (c->x86_model == spectre_bad_microcodes[i].model && + c->x86_mask == spectre_bad_microcodes[i].stepping) + return (c->microcode <= spectre_bad_microcodes[i].microcode); + } + return false; +} + static void early_init_intel(struct cpuinfo_x86 *c) { u64 misc_enable; @@ -122,6 +175,19 @@ static void early_init_intel(struct cpuinfo_x86 *c) if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) c->microcode = intel_get_microcode_revision(); + if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) || + cpu_has(c, X86_FEATURE_STIBP) || + cpu_has(c, X86_FEATURE_AMD_SPEC_CTRL) || + cpu_has(c, X86_FEATURE_AMD_PRED_CMD) || + cpu_has(c, X86_FEATURE_AMD_STIBP)) && bad_spectre_microcode(c)) { + pr_warn("Intel Spectre v2 broken microcode detected; disabling SPEC_CTRL\n"); + clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL); + clear_cpu_cap(c, X86_FEATURE_STIBP); + clear_cpu_cap(c, X86_FEATURE_AMD_SPEC_CTRL); + clear_cpu_cap(c, X86_FEATURE_AMD_PRED_CMD); + clear_cpu_cap(c, X86_FEATURE_AMD_STIBP); + } + /* * Atom erratum AAE44/AAF40/AAG38/AAH41: * From dbbbafce5380f2df32e142e5f08f17cf1b55b59c Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 25 Jan 2018 16:14:15 +0000 Subject: [PATCH 1708/3326] x86/speculation: Add basic IBPB (Indirect Branch Prediction Barrier) support commit 20ffa1caecca4db8f79fe665acdeaa5af815a24d Expose indirect_branch_prediction_barrier() for use in subsequent patches. [ tglx: Add IBPB status to spectre_v2 sysfs file ] Co-developed-by: KarimAllah Ahmed Signed-off-by: KarimAllah Ahmed Signed-off-by: David Woodhouse Cc: gnomes@lxorguk.ukuu.org.uk Cc: ak@linux.intel.com Cc: ashok.raj@intel.com Cc: dave.hansen@intel.com Cc: arjan@linux.intel.com Cc: torvalds@linux-foundation.org Cc: peterz@infradead.org Cc: bp@alien8.de Cc: pbonzini@redhat.com Cc: tim.c.chen@linux.intel.com Cc: gregkh@linux-foundation.org Link: https://lkml.kernel.org/r/1516896855-7642-8-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 2 ++ arch/x86/include/asm/nospec-branch.h | 13 +++++++++++++ arch/x86/kernel/cpu/bugs.c | 10 +++++++++- 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index ae3212f14dece4..07934b2f8df28a 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -210,6 +210,8 @@ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */ +#define X86_FEATURE_IBPB ( 7*32+21) /* Indirect Branch Prediction Barrier enabled*/ + /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ #define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 4ad41087ce0e77..34e384c7208f43 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -218,5 +218,18 @@ static inline void vmexit_fill_RSB(void) #endif } +static inline void indirect_branch_prediction_barrier(void) +{ + asm volatile(ALTERNATIVE("", + "movl %[msr], %%ecx\n\t" + "movl %[val], %%eax\n\t" + "movl $0, %%edx\n\t" + "wrmsr", + X86_FEATURE_IBPB) + : : [msr] "i" (MSR_IA32_PRED_CMD), + [val] "i" (PRED_CMD_IBPB) + : "eax", "ecx", "edx", "memory"); +} + #endif /* __ASSEMBLY__ */ #endif /* __NOSPEC_BRANCH_H__ */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 4a39d7bb4bd87b..bac7a3558db2ee 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -263,6 +263,13 @@ static void __init spectre_v2_select_mitigation(void) setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); pr_info("Filling RSB on context switch\n"); } + + /* Initialize Indirect Branch Prediction Barrier if supported */ + if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) || + boot_cpu_has(X86_FEATURE_AMD_PRED_CMD)) { + setup_force_cpu_cap(X86_FEATURE_IBPB); + pr_info("Enabling Indirect Branch Prediction Barrier\n"); + } } #undef pr_fmt @@ -292,7 +299,8 @@ ssize_t cpu_show_spectre_v2(struct device *dev, if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) return sprintf(buf, "Not affected\n"); - return sprintf(buf, "%s%s\n", spectre_v2_strings[spectre_v2_enabled], + return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], + boot_cpu_has(X86_FEATURE_IBPB) ? ", IPBP" : "", spectre_v2_bad_module ? " - vulnerable module loaded" : ""); } #endif From 67eb59b8ecfb319438706cee2cb67a3045b54494 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 26 Jan 2018 13:11:36 +0100 Subject: [PATCH 1709/3326] x86/alternative: Print unadorned pointers commit 0e6c16c652cadaffd25a6bb326ec10da5bcec6b4 After commit ad67b74d2469 ("printk: hash addresses printed with %p") pointers are being hashed when printed. However, this makes the alternative debug output completely useless. Switch to %px in order to see the unadorned kernel pointers. Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: riel@redhat.com Cc: ak@linux.intel.com Cc: peterz@infradead.org Cc: David Woodhouse Cc: jikos@kernel.org Cc: luto@amacapital.net Cc: dave.hansen@intel.com Cc: torvalds@linux-foundation.org Cc: keescook@google.com Cc: Josh Poimboeuf Cc: tim.c.chen@linux.intel.com Cc: gregkh@linux-foundation.org Cc: pjt@google.com Link: https://lkml.kernel.org/r/20180126121139.31959-2-bp@alien8.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/alternative.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index e0b97e4d1db556..14a52c7d23d468 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -298,7 +298,7 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) tgt_rip = next_rip + o_dspl; n_dspl = tgt_rip - orig_insn; - DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl); + DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl); if (tgt_rip - orig_insn >= 0) { if (n_dspl - 2 <= 127) @@ -355,7 +355,7 @@ static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *ins add_nops(instr + (a->instrlen - a->padlen), a->padlen); local_irq_restore(flags); - DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", + DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ", instr, a->instrlen - a->padlen, a->padlen); } @@ -376,7 +376,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, u8 *instr, *replacement; u8 insnbuf[MAX_PATCH_LEN]; - DPRINTK("alt table %p -> %p", start, end); + DPRINTK("alt table %px, -> %px", start, end); /* * The scan order should be from start to end. A later scanned * alternative code can overwrite previously scanned alternative code. @@ -400,14 +400,14 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, continue; } - DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d", + DPRINTK("feat: %d*32+%d, old: (%px len: %d), repl: (%px, len: %d), pad: %d", a->cpuid >> 5, a->cpuid & 0x1f, instr, a->instrlen, replacement, a->replacementlen, a->padlen); - DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr); - DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement); + DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr); + DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement); memcpy(insnbuf, replacement, a->replacementlen); insnbuf_sz = a->replacementlen; @@ -433,7 +433,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, a->instrlen - a->replacementlen); insnbuf_sz += a->instrlen - a->replacementlen; } - DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr); + DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr); text_poke_early(instr, insnbuf, insnbuf_sz); } From b955239cf4ea953eb5fca5409a5524daf20f642a Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 26 Jan 2018 13:11:37 +0100 Subject: [PATCH 1710/3326] x86/nospec: Fix header guards names commit 7a32fc51ca938e67974cbb9db31e1a43f98345a9 ... to adhere to the _ASM_X86_ naming scheme. No functional change. Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: riel@redhat.com Cc: ak@linux.intel.com Cc: peterz@infradead.org Cc: David Woodhouse Cc: jikos@kernel.org Cc: luto@amacapital.net Cc: dave.hansen@intel.com Cc: torvalds@linux-foundation.org Cc: keescook@google.com Cc: Josh Poimboeuf Cc: tim.c.chen@linux.intel.com Cc: gregkh@linux-foundation.org Cc: pjt@google.com Link: https://lkml.kernel.org/r/20180126121139.31959-3-bp@alien8.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/nospec-branch.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 34e384c7208f43..865192a2cc319d 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __NOSPEC_BRANCH_H__ -#define __NOSPEC_BRANCH_H__ +#ifndef _ASM_X86_NOSPEC_BRANCH_H_ +#define _ASM_X86_NOSPEC_BRANCH_H_ #include #include @@ -232,4 +232,4 @@ static inline void indirect_branch_prediction_barrier(void) } #endif /* __ASSEMBLY__ */ -#endif /* __NOSPEC_BRANCH_H__ */ +#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ From 91ff9a75f360eb89e913fcc4b3a7fbb602c67cbb Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 26 Jan 2018 13:11:39 +0100 Subject: [PATCH 1711/3326] x86/bugs: Drop one "mitigation" from dmesg commit 55fa19d3e51f33d9cd4056d25836d93abf9438db Make [ 0.031118] Spectre V2 mitigation: Mitigation: Full generic retpoline into [ 0.031118] Spectre V2: Mitigation: Full generic retpoline to reduce the mitigation mitigations strings. Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Reviewed-by: Greg Kroah-Hartman Cc: riel@redhat.com Cc: ak@linux.intel.com Cc: peterz@infradead.org Cc: David Woodhouse Cc: jikos@kernel.org Cc: luto@amacapital.net Cc: dave.hansen@intel.com Cc: torvalds@linux-foundation.org Cc: keescook@google.com Cc: Josh Poimboeuf Cc: tim.c.chen@linux.intel.com Cc: pjt@google.com Link: https://lkml.kernel.org/r/20180126121139.31959-5-bp@alien8.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/bugs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index bac7a3558db2ee..c988a8acb0d5e9 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -91,7 +91,7 @@ static const char *spectre_v2_strings[] = { }; #undef pr_fmt -#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt +#define pr_fmt(fmt) "Spectre V2 : " fmt static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; static bool spectre_v2_bad_module; From 249b1f7a7f09c9c5243f8c592ea7fb13871968bc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 27 Jan 2018 15:45:14 +0100 Subject: [PATCH 1712/3326] x86/cpu/bugs: Make retpoline module warning conditional MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit e383095c7fe8d218e00ec0f83e4b95ed4e627b02 If sysfs is disabled and RETPOLINE not defined: arch/x86/kernel/cpu/bugs.c:97:13: warning: ‘spectre_v2_bad_module’ defined but not used [-Wunused-variable] static bool spectre_v2_bad_module; Hide it. Fixes: caf7501a1b4e ("module/retpoline: Warn about missing retpoline in module") Reported-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: Andi Kleen Cc: David Woodhouse Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/bugs.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c988a8acb0d5e9..b0b7157df89ee5 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -94,9 +94,10 @@ static const char *spectre_v2_strings[] = { #define pr_fmt(fmt) "Spectre V2 : " fmt static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; -static bool spectre_v2_bad_module; #ifdef RETPOLINE +static bool spectre_v2_bad_module; + bool retpoline_module_ok(bool has_retpoline) { if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) @@ -106,6 +107,13 @@ bool retpoline_module_ok(bool has_retpoline) spectre_v2_bad_module = true; return false; } + +static inline const char *spectre_v2_module_string(void) +{ + return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; +} +#else +static inline const char *spectre_v2_module_string(void) { return ""; } #endif static void __init spec2_print_if_insecure(const char *reason) @@ -300,7 +308,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, return sprintf(buf, "Not affected\n"); return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], - boot_cpu_has(X86_FEATURE_IBPB) ? ", IPBP" : "", - spectre_v2_bad_module ? " - vulnerable module loaded" : ""); + boot_cpu_has(X86_FEATURE_IBPB) ? ", IBPB" : "", + spectre_v2_module_string()); } #endif From 7f3e0daa9e125d3d4d7720136370303009edbdeb Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 27 Jan 2018 16:24:32 +0000 Subject: [PATCH 1713/3326] x86/cpufeatures: Clean up Spectre v2 related CPUID flags commit 2961298efe1ea1b6fc0d7ee8b76018fa6c0bcef2 We want to expose the hardware features simply in /proc/cpuinfo as "ibrs", "ibpb" and "stibp". Since AMD has separate CPUID bits for those, use them as the user-visible bits. When the Intel SPEC_CTRL bit is set which indicates both IBRS and IBPB capability, set those (AMD) bits accordingly. Likewise if the Intel STIBP bit is set, set the AMD STIBP that's used for the generic hardware capability. Hide the rest from /proc/cpuinfo by putting "" in the comments. Including RETPOLINE and RETPOLINE_AMD which shouldn't be visible there. There are patches to make the sysfs vulnerabilities information non-readable by non-root, and the same should apply to all information about which mitigations are actually in use. Those *shouldn't* appear in /proc/cpuinfo. The feature bit for whether IBPB is actually used, which is needed for ALTERNATIVEs, is renamed to X86_FEATURE_USE_IBPB. Originally-by: Borislav Petkov Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Cc: ak@linux.intel.com Cc: dave.hansen@intel.com Cc: karahmed@amazon.de Cc: arjan@linux.intel.com Cc: torvalds@linux-foundation.org Cc: peterz@infradead.org Cc: bp@alien8.de Cc: pbonzini@redhat.com Cc: tim.c.chen@linux.intel.com Cc: gregkh@linux-foundation.org Link: https://lkml.kernel.org/r/1517070274-12128-2-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 18 ++++++++-------- arch/x86/include/asm/nospec-branch.h | 2 +- arch/x86/kernel/cpu/bugs.c | 7 +++---- arch/x86/kernel/cpu/intel.c | 31 +++++++++++++++++++--------- 4 files changed, 34 insertions(+), 24 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 07934b2f8df28a..73b5fff159a434 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -203,14 +203,14 @@ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ -#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */ -#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ -#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */ +#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ -#define X86_FEATURE_IBPB ( 7*32+21) /* Indirect Branch Prediction Barrier enabled*/ +#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ @@ -271,9 +271,9 @@ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ -#define X86_FEATURE_AMD_PRED_CMD (13*32+12) /* Prediction Command MSR (AMD) */ -#define X86_FEATURE_AMD_SPEC_CTRL (13*32+14) /* Speculation Control MSR only (AMD) */ -#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors (AMD) */ +#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */ +#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */ +#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ @@ -325,8 +325,8 @@ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ -#define X86_FEATURE_SPEC_CTRL (18*32+26) /* Speculation Control (IBRS + IBPB) */ -#define X86_FEATURE_STIBP (18*32+27) /* Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ +#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ /* diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 865192a2cc319d..19ecb5446b3022 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -225,7 +225,7 @@ static inline void indirect_branch_prediction_barrier(void) "movl %[val], %%eax\n\t" "movl $0, %%edx\n\t" "wrmsr", - X86_FEATURE_IBPB) + X86_FEATURE_USE_IBPB) : : [msr] "i" (MSR_IA32_PRED_CMD), [val] "i" (PRED_CMD_IBPB) : "eax", "ecx", "edx", "memory"); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index b0b7157df89ee5..32d8e6cdc09eee 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -273,9 +273,8 @@ static void __init spectre_v2_select_mitigation(void) } /* Initialize Indirect Branch Prediction Barrier if supported */ - if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) || - boot_cpu_has(X86_FEATURE_AMD_PRED_CMD)) { - setup_force_cpu_cap(X86_FEATURE_IBPB); + if (boot_cpu_has(X86_FEATURE_IBPB)) { + setup_force_cpu_cap(X86_FEATURE_USE_IBPB); pr_info("Enabling Indirect Branch Prediction Barrier\n"); } } @@ -308,7 +307,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, return sprintf(buf, "Not affected\n"); return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], - boot_cpu_has(X86_FEATURE_IBPB) ? ", IBPB" : "", + boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", spectre_v2_module_string()); } #endif diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 5faa487d047771..0c8b916abced5c 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -175,17 +175,28 @@ static void early_init_intel(struct cpuinfo_x86 *c) if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) c->microcode = intel_get_microcode_revision(); - if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) || - cpu_has(c, X86_FEATURE_STIBP) || - cpu_has(c, X86_FEATURE_AMD_SPEC_CTRL) || - cpu_has(c, X86_FEATURE_AMD_PRED_CMD) || - cpu_has(c, X86_FEATURE_AMD_STIBP)) && bad_spectre_microcode(c)) { - pr_warn("Intel Spectre v2 broken microcode detected; disabling SPEC_CTRL\n"); - clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL); + /* + * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, + * and they also have a different bit for STIBP support. Also, + * a hypervisor might have set the individual AMD bits even on + * Intel CPUs, for finer-grained selection of what's available. + */ + if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { + set_cpu_cap(c, X86_FEATURE_IBRS); + set_cpu_cap(c, X86_FEATURE_IBPB); + } + if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) + set_cpu_cap(c, X86_FEATURE_STIBP); + + /* Now if any of them are set, check the blacklist and clear the lot */ + if ((cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) || + cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) { + pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n"); + clear_cpu_cap(c, X86_FEATURE_IBRS); + clear_cpu_cap(c, X86_FEATURE_IBPB); clear_cpu_cap(c, X86_FEATURE_STIBP); - clear_cpu_cap(c, X86_FEATURE_AMD_SPEC_CTRL); - clear_cpu_cap(c, X86_FEATURE_AMD_PRED_CMD); - clear_cpu_cap(c, X86_FEATURE_AMD_STIBP); + clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL); + clear_cpu_cap(c, X86_FEATURE_INTEL_STIBP); } /* From 52d78bce4965e685eea60763076f688272f963ea Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 27 Jan 2018 16:24:33 +0000 Subject: [PATCH 1714/3326] x86/retpoline: Simplify vmexit_fill_RSB() commit 1dde7415e99933bb7293d6b2843752cbdb43ec11 Simplify it to call an asm-function instead of pasting 41 insn bytes at every call site. Also, add alignment to the macro as suggested here: https://support.google.com/faqs/answer/7625886 [dwmw2: Clean up comments, let it clobber %ebx and just tell the compiler] Signed-off-by: Borislav Petkov Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Cc: ak@linux.intel.com Cc: dave.hansen@intel.com Cc: karahmed@amazon.de Cc: arjan@linux.intel.com Cc: torvalds@linux-foundation.org Cc: peterz@infradead.org Cc: bp@alien8.de Cc: pbonzini@redhat.com Cc: tim.c.chen@linux.intel.com Cc: gregkh@linux-foundation.org Link: https://lkml.kernel.org/r/1517070274-12128-3-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_32.S | 3 +- arch/x86/entry/entry_64.S | 3 +- arch/x86/include/asm/asm-prototypes.h | 3 ++ arch/x86/include/asm/nospec-branch.h | 70 +++------------------------ arch/x86/lib/Makefile | 1 + arch/x86/lib/retpoline.S | 56 +++++++++++++++++++++ 6 files changed, 71 insertions(+), 65 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 60c4c342316cdf..2a35b1e0fb902a 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -252,7 +252,8 @@ ENTRY(__switch_to_asm) * exist, overwrite the RSB with entries which capture * speculative execution to prevent attack. */ - FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW + /* Clobbers %ebx */ + FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW #endif /* restore callee-saved registers */ diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index be6b66464f6a7d..4527630cc21e59 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -495,7 +495,8 @@ ENTRY(__switch_to_asm) * exist, overwrite the RSB with entries which capture * speculative execution to prevent attack. */ - FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW + /* Clobbers %rbx */ + FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW #endif /* restore callee-saved registers */ diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index 1908214b91257f..4d111616524b2e 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h @@ -38,4 +38,7 @@ INDIRECT_THUNK(dx) INDIRECT_THUNK(si) INDIRECT_THUNK(di) INDIRECT_THUNK(bp) +asmlinkage void __fill_rsb(void); +asmlinkage void __clear_rsb(void); + #endif /* CONFIG_RETPOLINE */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 19ecb5446b3022..df4ececa6ebfb3 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -7,50 +7,6 @@ #include #include -/* - * Fill the CPU return stack buffer. - * - * Each entry in the RSB, if used for a speculative 'ret', contains an - * infinite 'pause; lfence; jmp' loop to capture speculative execution. - * - * This is required in various cases for retpoline and IBRS-based - * mitigations for the Spectre variant 2 vulnerability. Sometimes to - * eliminate potentially bogus entries from the RSB, and sometimes - * purely to ensure that it doesn't get empty, which on some CPUs would - * allow predictions from other (unwanted!) sources to be used. - * - * We define a CPP macro such that it can be used from both .S files and - * inline assembly. It's possible to do a .macro and then include that - * from C via asm(".include ") but let's not go there. - */ - -#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ -#define RSB_FILL_LOOPS 16 /* To avoid underflow */ - -/* - * Google experimented with loop-unrolling and this turned out to be - * the optimal version — two calls, each with their own speculation - * trap should their return address end up getting used, in a loop. - */ -#define __FILL_RETURN_BUFFER(reg, nr, sp) \ - mov $(nr/2), reg; \ -771: \ - call 772f; \ -773: /* speculation trap */ \ - pause; \ - lfence; \ - jmp 773b; \ -772: \ - call 774f; \ -775: /* speculation trap */ \ - pause; \ - lfence; \ - jmp 775b; \ -774: \ - dec reg; \ - jnz 771b; \ - add $(BITS_PER_LONG/8) * nr, sp; - #ifdef __ASSEMBLY__ /* @@ -121,17 +77,10 @@ #endif .endm - /* - * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP - * monstrosity above, manually. - */ -.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req +/* This clobbers the BX register */ +.macro FILL_RETURN_BUFFER nr:req ftr:req #ifdef CONFIG_RETPOLINE - ANNOTATE_NOSPEC_ALTERNATIVE - ALTERNATIVE "jmp .Lskip_rsb_\@", \ - __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \ - \ftr -.Lskip_rsb_\@: + ALTERNATIVE "", "call __clear_rsb", \ftr #endif .endm @@ -206,15 +155,10 @@ extern char __indirect_thunk_end[]; static inline void vmexit_fill_RSB(void) { #ifdef CONFIG_RETPOLINE - unsigned long loops; - - asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE - ALTERNATIVE("jmp 910f", - __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), - X86_FEATURE_RETPOLINE) - "910:" - : "=r" (loops), ASM_CALL_CONSTRAINT - : : "memory" ); + alternative_input("", + "call __fill_rsb", + X86_FEATURE_RETPOLINE, + ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory")); #endif } diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index d435c89875c148..d0a3170e6804a3 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -27,6 +27,7 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o lib-$(CONFIG_RETPOLINE) += retpoline.o +OBJECT_FILES_NON_STANDARD_retpoline.o :=y obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index c909961e678a59..480edc3a5e0300 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -7,6 +7,7 @@ #include #include #include +#include .macro THUNK reg .section .text.__x86.indirect_thunk @@ -46,3 +47,58 @@ GENERATE_THUNK(r13) GENERATE_THUNK(r14) GENERATE_THUNK(r15) #endif + +/* + * Fill the CPU return stack buffer. + * + * Each entry in the RSB, if used for a speculative 'ret', contains an + * infinite 'pause; lfence; jmp' loop to capture speculative execution. + * + * This is required in various cases for retpoline and IBRS-based + * mitigations for the Spectre variant 2 vulnerability. Sometimes to + * eliminate potentially bogus entries from the RSB, and sometimes + * purely to ensure that it doesn't get empty, which on some CPUs would + * allow predictions from other (unwanted!) sources to be used. + * + * Google experimented with loop-unrolling and this turned out to be + * the optimal version - two calls, each with their own speculation + * trap should their return address end up getting used, in a loop. + */ +.macro STUFF_RSB nr:req sp:req + mov $(\nr / 2), %_ASM_BX + .align 16 +771: + call 772f +773: /* speculation trap */ + pause + lfence + jmp 773b + .align 16 +772: + call 774f +775: /* speculation trap */ + pause + lfence + jmp 775b + .align 16 +774: + dec %_ASM_BX + jnz 771b + add $((BITS_PER_LONG/8) * \nr), \sp +.endm + +#define RSB_FILL_LOOPS 16 /* To avoid underflow */ + +ENTRY(__fill_rsb) + STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP + ret +END(__fill_rsb) +EXPORT_SYMBOL_GPL(__fill_rsb) + +#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ + +ENTRY(__clear_rsb) + STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP + ret +END(__clear_rsb) +EXPORT_SYMBOL_GPL(__clear_rsb) From 9f8955f96bd520efc7def658ae61b67db20b1b46 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 27 Jan 2018 16:24:34 +0000 Subject: [PATCH 1715/3326] x86/speculation: Simplify indirect_branch_prediction_barrier() commit 64e16720ea0879f8ab4547e3b9758936d483909b Make it all a function which does the WRMSR instead of having a hairy inline asm. [dwmw2: export it, fix CONFIG_RETPOLINE issues] Signed-off-by: Borislav Petkov Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Cc: ak@linux.intel.com Cc: dave.hansen@intel.com Cc: karahmed@amazon.de Cc: arjan@linux.intel.com Cc: torvalds@linux-foundation.org Cc: peterz@infradead.org Cc: bp@alien8.de Cc: pbonzini@redhat.com Cc: tim.c.chen@linux.intel.com Cc: gregkh@linux-foundation.org Link: https://lkml.kernel.org/r/1517070274-12128-4-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/nospec-branch.h | 13 ++++--------- arch/x86/include/asm/processor.h | 3 +++ arch/x86/kernel/cpu/bugs.c | 6 ++++++ 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index df4ececa6ebfb3..d15d471348b815 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -164,15 +164,10 @@ static inline void vmexit_fill_RSB(void) static inline void indirect_branch_prediction_barrier(void) { - asm volatile(ALTERNATIVE("", - "movl %[msr], %%ecx\n\t" - "movl %[val], %%eax\n\t" - "movl $0, %%edx\n\t" - "wrmsr", - X86_FEATURE_USE_IBPB) - : : [msr] "i" (MSR_IA32_PRED_CMD), - [val] "i" (PRED_CMD_IBPB) - : "eax", "ecx", "edx", "memory"); + alternative_input("", + "call __ibp_barrier", + X86_FEATURE_USE_IBPB, + ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory")); } #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 9c18da64daa920..881ca3b1d6d49f 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -970,4 +970,7 @@ bool xen_set_default_idle(void); void stop_this_cpu(void *dummy); void df_debug(struct pt_regs *regs, long error_code); + +void __ibp_barrier(void); + #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 32d8e6cdc09eee..3bfb2b23d79c5a 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -311,3 +311,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, spectre_v2_module_string()); } #endif + +void __ibp_barrier(void) +{ + __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0); +} +EXPORT_SYMBOL_GPL(__ibp_barrier); From c8aa5cd906a583332f831b88a9d7177f0427b986 Mon Sep 17 00:00:00 2001 From: Jesse Chan Date: Wed, 10 Jan 2018 17:41:10 +0100 Subject: [PATCH 1716/3326] auxdisplay: img-ascii-lcd: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE commit 09c479f7f1fbfaf848e5813996793966cd50be81 upstream. This change resolves a new compile-time warning when built as a loadable module: WARNING: modpost: missing MODULE_LICENSE() in drivers/auxdisplay/img-ascii-lcd.o see include/linux/module.h for more information This adds the license as "GPL", which matches the header of the file. MODULE_DESCRIPTION and MODULE_AUTHOR are also added. Signed-off-by: Jesse Chan Signed-off-by: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- drivers/auxdisplay/img-ascii-lcd.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c index a9020f82eea713..58403052514f0e 100644 --- a/drivers/auxdisplay/img-ascii-lcd.c +++ b/drivers/auxdisplay/img-ascii-lcd.c @@ -443,3 +443,7 @@ static struct platform_driver img_ascii_lcd_driver = { .remove = img_ascii_lcd_remove, }; module_platform_driver(img_ascii_lcd_driver); + +MODULE_DESCRIPTION("Imagination Technologies ASCII LCD Display"); +MODULE_AUTHOR("Paul Burton "); +MODULE_LICENSE("GPL"); From ce094a80f019049a7bfb6860a91f3856bd53ec3b Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 13 Nov 2017 11:29:55 +0100 Subject: [PATCH 1717/3326] iio: adc/accel: Fix up module licenses commit 9a0ebbc93547d88f422905c34dcceebe928f3e9e upstream. The module license checker complains about these two so just fix it up. They are both GPLv2, both written by me or using code I extracted while refactoring from the GPLv2 drivers. Cc: Randy Dunlap Reported-by: Randy Dunlap Signed-off-by: Linus Walleij Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/accel/kxsd9-i2c.c | 3 +++ drivers/iio/adc/qcom-vadc-common.c | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c index 98fbb628d5bdd1..38411e1c155bd3 100644 --- a/drivers/iio/accel/kxsd9-i2c.c +++ b/drivers/iio/accel/kxsd9-i2c.c @@ -63,3 +63,6 @@ static struct i2c_driver kxsd9_i2c_driver = { .id_table = kxsd9_i2c_id, }; module_i2c_driver(kxsd9_i2c_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("KXSD9 accelerometer I2C interface"); diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c index 47d24ae5462fe8..fe3d7826783c0f 100644 --- a/drivers/iio/adc/qcom-vadc-common.c +++ b/drivers/iio/adc/qcom-vadc-common.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "qcom-vadc-common.h" @@ -229,3 +230,6 @@ int qcom_vadc_decimation_from_dt(u32 value) return __ffs64(value / VADC_DECIMATION_MIN); } EXPORT_SYMBOL(qcom_vadc_decimation_from_dt); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Qualcomm ADC common functionality"); From bc484da3e18c13b15badcc861e816514ccd86f8c Mon Sep 17 00:00:00 2001 From: Jesse Chan Date: Mon, 20 Nov 2017 12:58:03 -0800 Subject: [PATCH 1718/3326] pinctrl: pxa: pxa2xx: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE commit 0b9335cbd38e3bd2025bcc23b5758df4ac035f75 upstream. This change resolves a new compile-time warning when built as a loadable module: WARNING: modpost: missing MODULE_LICENSE() in drivers/pinctrl/pxa/pinctrl-pxa2xx.o see include/linux/module.h for more information This adds the license as "GPL v2", which matches the header of the file. MODULE_DESCRIPTION and MODULE_AUTHOR are also added. Signed-off-by: Jesse Chan Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/pinctrl/pxa/pinctrl-pxa2xx.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c index 866aa3ce1ac95a..6cf0006d4c8df4 100644 --- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c +++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c @@ -436,3 +436,7 @@ int pxa2xx_pinctrl_exit(struct platform_device *pdev) return 0; } EXPORT_SYMBOL_GPL(pxa2xx_pinctrl_exit); + +MODULE_AUTHOR("Robert Jarzmik "); +MODULE_DESCRIPTION("Marvell PXA2xx pinctrl driver"); +MODULE_LICENSE("GPL v2"); From 76f06358ec400cd5f7c8260fd290e7a7ee20fbcf Mon Sep 17 00:00:00 2001 From: Jesse Chan Date: Sun, 19 Nov 2017 23:45:49 -0800 Subject: [PATCH 1719/3326] ASoC: pcm512x: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE commit 0cab20cec0b663b7be8e2be5998d5a4113647f86 upstream. This change resolves a new compile-time warning when built as a loadable module: WARNING: modpost: missing MODULE_LICENSE() in sound/soc/codecs/snd-soc-pcm512x-spi.o see include/linux/module.h for more information This adds the license as "GPL v2", which matches the header of the file. MODULE_DESCRIPTION and MODULE_AUTHOR are also added. Signed-off-by: Jesse Chan Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/codecs/pcm512x-spi.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sound/soc/codecs/pcm512x-spi.c b/sound/soc/codecs/pcm512x-spi.c index 712ed6598c4827..ebdf9bd5a64c53 100644 --- a/sound/soc/codecs/pcm512x-spi.c +++ b/sound/soc/codecs/pcm512x-spi.c @@ -70,3 +70,7 @@ static struct spi_driver pcm512x_spi_driver = { }; module_spi_driver(pcm512x_spi_driver); + +MODULE_DESCRIPTION("ASoC PCM512x codec driver - SPI"); +MODULE_AUTHOR("Mark Brown "); +MODULE_LICENSE("GPL v2"); From 2e9521197f08d8cec826d3a5ff04131e40a79120 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Mon, 27 Nov 2017 17:22:25 -0600 Subject: [PATCH 1720/3326] KVM: nVMX: Eliminate vmcs02 pool MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit de3a0021a60635de96aa92713c1a31a96747d72c The potential performance advantages of a vmcs02 pool have never been realized. To simplify the code, eliminate the pool. Instead, a single vmcs02 is allocated per VCPU when the VCPU enters VMX operation. Cc: stable@vger.kernel.org # prereq for Spectre mitigation Signed-off-by: Jim Mattson Signed-off-by: Mark Kanda Reviewed-by: Ameya More Reviewed-by: David Hildenbrand Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 146 +++++++-------------------------------------- 1 file changed, 23 insertions(+), 123 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 213c092f2a9fb9..95546c5926e144 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -182,7 +182,6 @@ module_param(ple_window_max, int, S_IRUGO); extern const ulong vmx_return; #define NR_AUTOLOAD_MSRS 8 -#define VMCS02_POOL_SIZE 1 struct vmcs { u32 revision_id; @@ -223,7 +222,7 @@ struct shared_msr_entry { * stored in guest memory specified by VMPTRLD, but is opaque to the guest, * which must access it using VMREAD/VMWRITE/VMCLEAR instructions. * More than one of these structures may exist, if L1 runs multiple L2 guests. - * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the + * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the * underlying hardware which will be used to run L2. * This structure is packed to ensure that its layout is identical across * machines (necessary for live migration). @@ -406,13 +405,6 @@ struct __packed vmcs12 { */ #define VMCS12_SIZE 0x1000 -/* Used to remember the last vmcs02 used for some recently used vmcs12s */ -struct vmcs02_list { - struct list_head list; - gpa_t vmptr; - struct loaded_vmcs vmcs02; -}; - /* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu. @@ -437,15 +429,15 @@ struct nested_vmx { */ bool sync_shadow_vmcs; - /* vmcs02_list cache of VMCSs recently used to run L2 guests */ - struct list_head vmcs02_pool; - int vmcs02_num; bool change_vmcs01_virtual_x2apic_mode; /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; + + struct loaded_vmcs vmcs02; + /* - * Guest pages referred to in vmcs02 with host-physical pointers, so - * we must keep them pinned while L2 runs. + * Guest pages referred to in the vmcs02 with host-physical + * pointers, so we must keep them pinned while L2 runs. */ struct page *apic_access_page; struct page *virtual_apic_page; @@ -6963,94 +6955,6 @@ static int handle_monitor(struct kvm_vcpu *vcpu) return handle_nop(vcpu); } -/* - * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12. - * We could reuse a single VMCS for all the L2 guests, but we also want the - * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this - * allows keeping them loaded on the processor, and in the future will allow - * optimizations where prepare_vmcs02 doesn't need to set all the fields on - * every entry if they never change. - * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE - * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first. - * - * The following functions allocate and free a vmcs02 in this pool. - */ - -/* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */ -static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) -{ - struct vmcs02_list *item; - list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) - if (item->vmptr == vmx->nested.current_vmptr) { - list_move(&item->list, &vmx->nested.vmcs02_pool); - return &item->vmcs02; - } - - if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) { - /* Recycle the least recently used VMCS. */ - item = list_last_entry(&vmx->nested.vmcs02_pool, - struct vmcs02_list, list); - item->vmptr = vmx->nested.current_vmptr; - list_move(&item->list, &vmx->nested.vmcs02_pool); - return &item->vmcs02; - } - - /* Create a new VMCS */ - item = kzalloc(sizeof(struct vmcs02_list), GFP_KERNEL); - if (!item) - return NULL; - item->vmcs02.vmcs = alloc_vmcs(); - item->vmcs02.shadow_vmcs = NULL; - if (!item->vmcs02.vmcs) { - kfree(item); - return NULL; - } - loaded_vmcs_init(&item->vmcs02); - item->vmptr = vmx->nested.current_vmptr; - list_add(&(item->list), &(vmx->nested.vmcs02_pool)); - vmx->nested.vmcs02_num++; - return &item->vmcs02; -} - -/* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */ -static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) -{ - struct vmcs02_list *item; - list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) - if (item->vmptr == vmptr) { - free_loaded_vmcs(&item->vmcs02); - list_del(&item->list); - kfree(item); - vmx->nested.vmcs02_num--; - return; - } -} - -/* - * Free all VMCSs saved for this vcpu, except the one pointed by - * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs - * must be &vmx->vmcs01. - */ -static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) -{ - struct vmcs02_list *item, *n; - - WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01); - list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { - /* - * Something will leak if the above WARN triggers. Better than - * a use-after-free. - */ - if (vmx->loaded_vmcs == &item->vmcs02) - continue; - - free_loaded_vmcs(&item->vmcs02); - list_del(&item->list); - kfree(item); - vmx->nested.vmcs02_num--; - } -} - /* * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), * set the success or error code of an emulated VMX instruction, as specified @@ -7232,6 +7136,12 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs *shadow_vmcs; + vmx->nested.vmcs02.vmcs = alloc_vmcs(); + vmx->nested.vmcs02.shadow_vmcs = NULL; + if (!vmx->nested.vmcs02.vmcs) + goto out_vmcs02; + loaded_vmcs_init(&vmx->nested.vmcs02); + if (cpu_has_vmx_msr_bitmap()) { vmx->nested.msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); @@ -7254,9 +7164,6 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) vmx->vmcs01.shadow_vmcs = shadow_vmcs; } - INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); - vmx->nested.vmcs02_num = 0; - hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; @@ -7271,6 +7178,9 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) free_page((unsigned long)vmx->nested.msr_bitmap); out_msr_bitmap: + free_loaded_vmcs(&vmx->nested.vmcs02); + +out_vmcs02: return -ENOMEM; } @@ -7423,7 +7333,7 @@ static void free_nested(struct vcpu_vmx *vmx) vmx->vmcs01.shadow_vmcs = NULL; } kfree(vmx->nested.cached_vmcs12); - /* Unpin physical memory we referred to in current vmcs02 */ + /* Unpin physical memory we referred to in the vmcs02 */ if (vmx->nested.apic_access_page) { kvm_release_page_dirty(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; @@ -7439,7 +7349,7 @@ static void free_nested(struct vcpu_vmx *vmx) vmx->nested.pi_desc = NULL; } - nested_free_all_saved_vmcss(vmx); + free_loaded_vmcs(&vmx->nested.vmcs02); } /* Emulate the VMXOFF instruction */ @@ -7482,8 +7392,6 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) vmptr + offsetof(struct vmcs12, launch_state), &zero, sizeof(zero)); - nested_free_vmcs02(vmx, vmptr); - nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } @@ -8395,10 +8303,11 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) /* * The host physical addresses of some pages of guest memory - * are loaded into VMCS02 (e.g. L1's Virtual APIC Page). The CPU - * may write to these pages via their host physical address while - * L2 is running, bypassing any address-translation-based dirty - * tracking (e.g. EPT write protection). + * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC + * Page). The CPU may write to these pages via their host + * physical address while L2 is running, bypassing any + * address-translation-based dirty tracking (e.g. EPT write + * protection). * * Mark them dirty on every exit from L2 to prevent them from * getting out of sync with dirty tracking. @@ -10894,20 +10803,15 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - struct loaded_vmcs *vmcs02; u32 msr_entry_idx; u32 exit_qual; - vmcs02 = nested_get_current_vmcs02(vmx); - if (!vmcs02) - return -ENOMEM; - enter_guest_mode(vcpu); if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); - vmx_switch_vmcs(vcpu, vmcs02); + vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); vmx_segment_cache_clear(vmx); if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) { @@ -11522,10 +11426,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, vm_exit_controls_reset_shadow(vmx); vmx_segment_cache_clear(vmx); - /* if no vmcs02 cache requested, remove the one we used */ - if (VMCS02_POOL_SIZE == 0) - nested_free_vmcs02(vmx, vmx->nested.current_vmptr); - /* Update any VMCS fields that might have changed while L2 ran */ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); From 92f4b68ed14c7731cadd5bf7501a98da5302c093 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 11 Jan 2018 12:16:15 +0100 Subject: [PATCH 1721/3326] KVM: VMX: introduce alloc_loaded_vmcs commit f21f165ef922c2146cc5bdc620f542953c41714b Group together the calls to alloc_vmcs and loaded_vmcs_init. Soon we'll also allocate an MSR bitmap there. Cc: stable@vger.kernel.org # prereq for Spectre mitigation Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 95546c5926e144..bca85821b0f00c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3814,11 +3814,6 @@ static struct vmcs *alloc_vmcs_cpu(int cpu) return vmcs; } -static struct vmcs *alloc_vmcs(void) -{ - return alloc_vmcs_cpu(raw_smp_processor_id()); -} - static void free_vmcs(struct vmcs *vmcs) { free_pages((unsigned long)vmcs, vmcs_config.order); @@ -3837,6 +3832,22 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) WARN_ON(loaded_vmcs->shadow_vmcs != NULL); } +static struct vmcs *alloc_vmcs(void) +{ + return alloc_vmcs_cpu(raw_smp_processor_id()); +} + +static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) +{ + loaded_vmcs->vmcs = alloc_vmcs(); + if (!loaded_vmcs->vmcs) + return -ENOMEM; + + loaded_vmcs->shadow_vmcs = NULL; + loaded_vmcs_init(loaded_vmcs); + return 0; +} + static void free_kvm_area(void) { int cpu; @@ -7135,12 +7146,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs *shadow_vmcs; + int r; - vmx->nested.vmcs02.vmcs = alloc_vmcs(); - vmx->nested.vmcs02.shadow_vmcs = NULL; - if (!vmx->nested.vmcs02.vmcs) + r = alloc_loaded_vmcs(&vmx->nested.vmcs02); + if (r < 0) goto out_vmcs02; - loaded_vmcs_init(&vmx->nested.vmcs02); if (cpu_has_vmx_msr_bitmap()) { vmx->nested.msr_bitmap = @@ -9535,13 +9545,11 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) if (!vmx->guest_msrs) goto free_pml; - vmx->loaded_vmcs = &vmx->vmcs01; - vmx->loaded_vmcs->vmcs = alloc_vmcs(); - vmx->loaded_vmcs->shadow_vmcs = NULL; - if (!vmx->loaded_vmcs->vmcs) + err = alloc_loaded_vmcs(&vmx->vmcs01); + if (err < 0) goto free_msrs; - loaded_vmcs_init(vmx->loaded_vmcs); + vmx->loaded_vmcs = &vmx->vmcs01; cpu = get_cpu(); vmx_vcpu_load(&vmx->vcpu, cpu); vmx->vcpu.cpu = cpu; From a358df03279e806f8f4217eacf813c5b6c3578ee Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 29 Jan 2018 22:00:39 -0600 Subject: [PATCH 1722/3326] objtool: Improve retpoline alternative handling commit a845c7cf4b4cb5e9e3b2823867892b27646f3a98 Currently objtool requires all retpolines to be: a) patched in with alternatives; and b) annotated with ANNOTATE_NOSPEC_ALTERNATIVE. If you forget to do both of the above, objtool segfaults trying to dereference a NULL 'insn->call_dest' pointer. Avoid that situation and print a more helpful error message: quirks.o: warning: objtool: efi_delete_dummy_variable()+0x99: unsupported intra-function call quirks.o: warning: objtool: If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE. Future improvements can be made to make objtool smarter with respect to retpolines, but this is a good incremental improvement for now. Reported-and-tested-by: Guenter Roeck Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: H. Peter Anvin Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/819e50b6d9c2e1a22e34c1a636c0b2057cc8c6e5.1517284349.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/objtool/check.c | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index f40d46e24bcce3..bc3490d929ffe6 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -543,18 +543,14 @@ static int add_call_destinations(struct objtool_file *file) dest_off = insn->offset + insn->len + insn->immediate; insn->call_dest = find_symbol_by_offset(insn->sec, dest_off); - /* - * FIXME: Thanks to retpolines, it's now considered - * normal for a function to call within itself. So - * disable this warning for now. - */ -#if 0 - if (!insn->call_dest) { - WARN_FUNC("can't find call dest symbol at offset 0x%lx", - insn->sec, insn->offset, dest_off); + + if (!insn->call_dest && !insn->ignore) { + WARN_FUNC("unsupported intra-function call", + insn->sec, insn->offset); + WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE."); return -1; } -#endif + } else if (rela->sym->type == STT_SECTION) { insn->call_dest = find_symbol_by_offset(rela->sym->sec, rela->addend+4); @@ -648,6 +644,8 @@ static int handle_group_alt(struct objtool_file *file, last_new_insn = insn; + insn->ignore = orig_insn->ignore_alts; + if (insn->type != INSN_JUMP_CONDITIONAL && insn->type != INSN_JUMP_UNCONDITIONAL) continue; @@ -729,10 +727,6 @@ static int add_special_section_alts(struct objtool_file *file) goto out; } - /* Ignore retpoline alternatives. */ - if (orig_insn->ignore_alts) - continue; - new_insn = NULL; if (!special_alt->group || special_alt->new_len) { new_insn = find_insn(file, special_alt->new_sec, @@ -1089,11 +1083,11 @@ static int decode_sections(struct objtool_file *file) if (ret) return ret; - ret = add_call_destinations(file); + ret = add_special_section_alts(file); if (ret) return ret; - ret = add_special_section_alts(file); + ret = add_call_destinations(file); if (ret) return ret; @@ -1720,10 +1714,12 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, insn->visited = true; - list_for_each_entry(alt, &insn->alts, list) { - ret = validate_branch(file, alt->insn, state); - if (ret) - return 1; + if (!insn->ignore_alts) { + list_for_each_entry(alt, &insn->alts, list) { + ret = validate_branch(file, alt->insn, state); + if (ret) + return 1; + } } switch (insn->type) { From 3e04e09855c5cbc4f18c2d14bec326b1ff6f11f6 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 29 Jan 2018 22:00:40 -0600 Subject: [PATCH 1723/3326] objtool: Add support for alternatives at the end of a section commit 17bc33914bcc98ba3c6b426fd1c49587a25c0597 Now that the previous patch gave objtool the ability to read retpoline alternatives, it shows a new warning: arch/x86/entry/entry_64.o: warning: objtool: .entry_trampoline: don't know how to handle alternatives at end of section This is due to the JMP_NOSPEC in entry_SYSCALL_64_trampoline(). Previously, objtool ignored this situation because it wasn't needed, and it would have required a bit of extra code. Now that this case exists, add proper support for it. Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: Guenter Roeck Cc: H. Peter Anvin Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/2a30a3c2158af47d891a76e69bb1ef347e0443fd.1517284349.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/objtool/check.c | 53 +++++++++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 22 deletions(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index bc3490d929ffe6..9cd028aa150980 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -594,7 +594,7 @@ static int handle_group_alt(struct objtool_file *file, struct instruction *orig_insn, struct instruction **new_insn) { - struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump; + struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump = NULL; unsigned long dest_off; last_orig_insn = NULL; @@ -610,28 +610,30 @@ static int handle_group_alt(struct objtool_file *file, last_orig_insn = insn; } - if (!next_insn_same_sec(file, last_orig_insn)) { - WARN("%s: don't know how to handle alternatives at end of section", - special_alt->orig_sec->name); - return -1; - } - - fake_jump = malloc(sizeof(*fake_jump)); - if (!fake_jump) { - WARN("malloc failed"); - return -1; + if (next_insn_same_sec(file, last_orig_insn)) { + fake_jump = malloc(sizeof(*fake_jump)); + if (!fake_jump) { + WARN("malloc failed"); + return -1; + } + memset(fake_jump, 0, sizeof(*fake_jump)); + INIT_LIST_HEAD(&fake_jump->alts); + clear_insn_state(&fake_jump->state); + + fake_jump->sec = special_alt->new_sec; + fake_jump->offset = -1; + fake_jump->type = INSN_JUMP_UNCONDITIONAL; + fake_jump->jump_dest = list_next_entry(last_orig_insn, list); + fake_jump->ignore = true; } - memset(fake_jump, 0, sizeof(*fake_jump)); - INIT_LIST_HEAD(&fake_jump->alts); - clear_insn_state(&fake_jump->state); - - fake_jump->sec = special_alt->new_sec; - fake_jump->offset = -1; - fake_jump->type = INSN_JUMP_UNCONDITIONAL; - fake_jump->jump_dest = list_next_entry(last_orig_insn, list); - fake_jump->ignore = true; if (!special_alt->new_len) { + if (!fake_jump) { + WARN("%s: empty alternative at end of section", + special_alt->orig_sec->name); + return -1; + } + *new_insn = fake_jump; return 0; } @@ -654,8 +656,14 @@ static int handle_group_alt(struct objtool_file *file, continue; dest_off = insn->offset + insn->len + insn->immediate; - if (dest_off == special_alt->new_off + special_alt->new_len) + if (dest_off == special_alt->new_off + special_alt->new_len) { + if (!fake_jump) { + WARN("%s: alternative jump to end of section", + special_alt->orig_sec->name); + return -1; + } insn->jump_dest = fake_jump; + } if (!insn->jump_dest) { WARN_FUNC("can't find alternative jump destination", @@ -670,7 +678,8 @@ static int handle_group_alt(struct objtool_file *file, return -1; } - list_add(&fake_jump->list, &last_new_insn->list); + if (fake_jump) + list_add(&fake_jump->list, &last_new_insn->list); return 0; } From 23a4ca4e749a300a29abff1e304c55b55b6e59a3 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 29 Jan 2018 22:00:41 -0600 Subject: [PATCH 1724/3326] objtool: Warn on stripped section symbol commit 830c1e3d16b2c1733cd1ec9c8f4d47a398ae31bc With the following fix: 2a0098d70640 ("objtool: Fix seg fault with gold linker") ... a seg fault was avoided, but the original seg fault condition in objtool wasn't fixed. Replace the seg fault with an error message. Suggested-by: Ingo Molnar Signed-off-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: Guenter Roeck Cc: H. Peter Anvin Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/dc4585a70d6b975c99fc51d1957ccdde7bd52f3a.1517284349.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/objtool/orc_gen.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index e61fe703197baa..18384d9be4e170 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -98,6 +98,11 @@ static int create_orc_entry(struct section *u_sec, struct section *ip_relasec, struct orc_entry *orc; struct rela *rela; + if (!insn_sec->sym) { + WARN("missing symbol for section %s", insn_sec->name); + return -1; + } + /* populate ORC data */ orc = (struct orc_entry *)u_sec->data->d_buf + idx; memcpy(orc, o, sizeof(*orc)); From 04e073072d1494ce9f60cc2db68fb79aee4283dd Mon Sep 17 00:00:00 2001 From: William Grant Date: Tue, 30 Jan 2018 22:22:55 +1100 Subject: [PATCH 1725/3326] x86/mm: Fix overlap of i386 CPU_ENTRY_AREA with FIX_BTMAP commit 55f49fcb879fbeebf2a8c1ac7c9e6d90df55f798 Since commit 92a0f81d8957 ("x86/cpu_entry_area: Move it out of the fixmap"), i386's CPU_ENTRY_AREA has been mapped to the memory area just below FIXADDR_START. But already immediately before FIXADDR_START is the FIX_BTMAP area, which means that early_ioremap can collide with the entry area. It's especially bad on PAE where FIX_BTMAP_BEGIN gets aligned to exactly match CPU_ENTRY_AREA_BASE, so the first early_ioremap slot clobbers the IDT and causes interrupts during early boot to reset the system. The overlap wasn't a problem before the CPU entry area was introduced, as the fixmap has classically been preceded by the pkmap or vmalloc areas, neither of which is used until early_ioremap is out of the picture. Relocate CPU_ENTRY_AREA to below FIX_BTMAP, not just below the permanent fixmap area. Fixes: commit 92a0f81d8957 ("x86/cpu_entry_area: Move it out of the fixmap") Signed-off-by: William Grant Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/7041d181-a019-e8b9-4e4e-48215f841e2c@canonical.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/fixmap.h | 6 ++++-- arch/x86/include/asm/pgtable_32_types.h | 5 +++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 64c4a30e0d3962..e203169931c721 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -137,8 +137,10 @@ enum fixed_addresses { extern void reserve_top_address(unsigned long reserve); -#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXADDR_TOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_TOT_START (FIXADDR_TOP - FIXADDR_TOT_SIZE) extern int fixmaps_set; diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index ce245b0cdfcaa4..0777e18a1d2381 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h @@ -44,8 +44,9 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ */ #define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40) -#define CPU_ENTRY_AREA_BASE \ - ((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK) +#define CPU_ENTRY_AREA_BASE \ + ((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) \ + & PMD_MASK) #define PKMAP_BASE \ ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) From 74ae346691dd34d49a5e733af40ca25a4dd42245 Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Tue, 30 Jan 2018 14:13:50 +0800 Subject: [PATCH 1726/3326] x86/spectre: Check CONFIG_RETPOLINE in command line parser commit 9471eee9186a46893726e22ebb54cade3f9bc043 The spectre_v2 option 'auto' does not check whether CONFIG_RETPOLINE is enabled. As a consequence it fails to emit the appropriate warning and sets feature flags which have no effect at all. Add the missing IS_ENABLED() check. Fixes: da285121560e ("x86/spectre: Add boot time option to select Spectre v2 mitigation") Signed-off-by: Dou Liyang Signed-off-by: Thomas Gleixner Cc: ak@linux.intel.com Cc: peterz@infradead.org Cc: Tomohiro Cc: dave.hansen@intel.com Cc: bp@alien8.de Cc: arjan@linux.intel.com Cc: dwmw@amazon.co.uk Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/f5892721-7528-3647-08fb-f8d10e65ad87@cn.fujitsu.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/bugs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 3bfb2b23d79c5a..400c34ec9179af 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -213,10 +213,10 @@ static void __init spectre_v2_select_mitigation(void) return; case SPECTRE_V2_CMD_FORCE: - /* FALLTRHU */ case SPECTRE_V2_CMD_AUTO: - goto retpoline_auto; - + if (IS_ENABLED(CONFIG_RETPOLINE)) + goto retpoline_auto; + break; case SPECTRE_V2_CMD_RETPOLINE_AMD: if (IS_ENABLED(CONFIG_RETPOLINE)) goto retpoline_amd; From 8459ebcbd6ecbbefd6455315985dc0da1a450b13 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sun, 28 Jan 2018 10:38:49 -0800 Subject: [PATCH 1727/3326] x86/entry/64: Remove the SYSCALL64 fast path commit 21d375b6b34ff511a507de27bf316b3dde6938d9 The SYCALLL64 fast path was a nice, if small, optimization back in the good old days when syscalls were actually reasonably fast. Now there is PTI to slow everything down, and indirect branches are verboten, making everything messier. The retpoline code in the fast path is particularly nasty. Just get rid of the fast path. The slow path is barely slower. [ tglx: Split out the 'push all extra regs' part ] Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Cc: Borislav Petkov Cc: Linus Torvalds Cc: Kernel Hardening Link: https://lkml.kernel.org/r/462dff8d4d64dfbfc851fbf3130641809d980ecd.1517164461.git.luto@kernel.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 117 ------------------------------------ arch/x86/entry/syscall_64.c | 7 +-- 2 files changed, 2 insertions(+), 122 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 4527630cc21e59..dd103ed7a622d5 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -237,86 +237,11 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) TRACE_IRQS_OFF - /* - * If we need to do entry work or if we guess we'll need to do - * exit work, go straight to the slow path. - */ - movq PER_CPU_VAR(current_task), %r11 - testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) - jnz entry_SYSCALL64_slow_path - -entry_SYSCALL_64_fastpath: - /* - * Easy case: enable interrupts and issue the syscall. If the syscall - * needs pt_regs, we'll call a stub that disables interrupts again - * and jumps to the slow path. - */ - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) -#if __SYSCALL_MASK == ~0 - cmpq $__NR_syscall_max, %rax -#else - andl $__SYSCALL_MASK, %eax - cmpl $__NR_syscall_max, %eax -#endif - ja 1f /* return -ENOSYS (already in pt_regs->ax) */ - movq %r10, %rcx - - /* - * This call instruction is handled specially in stub_ptregs_64. - * It might end up jumping to the slow path. If it jumps, RAX - * and all argument registers are clobbered. - */ -#ifdef CONFIG_RETPOLINE - movq sys_call_table(, %rax, 8), %rax - call __x86_indirect_thunk_rax -#else - call *sys_call_table(, %rax, 8) -#endif -.Lentry_SYSCALL_64_after_fastpath_call: - - movq %rax, RAX(%rsp) -1: - - /* - * If we get here, then we know that pt_regs is clean for SYSRET64. - * If we see that no exit work is required (which we are required - * to check with IRQs off), then we can go straight to SYSRET64. - */ - DISABLE_INTERRUPTS(CLBR_ANY) - TRACE_IRQS_OFF - movq PER_CPU_VAR(current_task), %r11 - testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) - jnz 1f - - LOCKDEP_SYS_EXIT - TRACE_IRQS_ON /* user mode is traced as IRQs on */ - movq RIP(%rsp), %rcx - movq EFLAGS(%rsp), %r11 - addq $6*8, %rsp /* skip extra regs -- they were preserved */ - UNWIND_HINT_EMPTY - jmp .Lpop_c_regs_except_rcx_r11_and_sysret - -1: - /* - * The fast path looked good when we started, but something changed - * along the way and we need to switch to the slow path. Calling - * raise(3) will trigger this, for example. IRQs are off. - */ - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_ANY) - SAVE_EXTRA_REGS - movq %rsp, %rdi - call syscall_return_slowpath /* returns with IRQs disabled */ - jmp return_from_SYSCALL_64 - -entry_SYSCALL64_slow_path: /* IRQs are off. */ SAVE_EXTRA_REGS movq %rsp, %rdi call do_syscall_64 /* returns with IRQs disabled */ -return_from_SYSCALL_64: TRACE_IRQS_IRETQ /* we're about to change IF */ /* @@ -389,7 +314,6 @@ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ UNWIND_HINT_EMPTY POP_EXTRA_REGS -.Lpop_c_regs_except_rcx_r11_and_sysret: popq %rsi /* skip r11 */ popq %r10 popq %r9 @@ -420,47 +344,6 @@ syscall_return_via_sysret: USERGS_SYSRET64 END(entry_SYSCALL_64) -ENTRY(stub_ptregs_64) - /* - * Syscalls marked as needing ptregs land here. - * If we are on the fast path, we need to save the extra regs, - * which we achieve by trying again on the slow path. If we are on - * the slow path, the extra regs are already saved. - * - * RAX stores a pointer to the C function implementing the syscall. - * IRQs are on. - */ - cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) - jne 1f - - /* - * Called from fast path -- disable IRQs again, pop return address - * and jump to slow path - */ - DISABLE_INTERRUPTS(CLBR_ANY) - TRACE_IRQS_OFF - popq %rax - UNWIND_HINT_REGS extra=0 - jmp entry_SYSCALL64_slow_path - -1: - JMP_NOSPEC %rax /* Called from C */ -END(stub_ptregs_64) - -.macro ptregs_stub func -ENTRY(ptregs_\func) - UNWIND_HINT_FUNC - leaq \func(%rip), %rax - jmp stub_ptregs_64 -END(ptregs_\func) -.endm - -/* Instantiate ptregs_stub for each ptregs-using syscall */ -#define __SYSCALL_64_QUAL_(sym) -#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym -#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym) -#include - /* * %rdi: prev task * %rsi: next task diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c index 9c09775e589d67..c176d2fab1da98 100644 --- a/arch/x86/entry/syscall_64.c +++ b/arch/x86/entry/syscall_64.c @@ -7,14 +7,11 @@ #include #include -#define __SYSCALL_64_QUAL_(sym) sym -#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_##sym - -#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long __SYSCALL_64_QUAL_##qual(sym)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); +#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); #include #undef __SYSCALL_64 -#define __SYSCALL_64(nr, sym, qual) [nr] = __SYSCALL_64_QUAL_##qual(sym), +#define __SYSCALL_64(nr, sym, qual) [nr] = sym, extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); From 90522d30b223fe35957fc8c8ee3b7a63dc9844b1 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sun, 28 Jan 2018 10:38:49 -0800 Subject: [PATCH 1728/3326] x86/entry/64: Push extra regs right away commit d1f7732009e0549eedf8ea1db948dc37be77fd46 With the fast path removed there is no point in splitting the push of the normal and the extra register set. Just push the extra regs right away. [ tglx: Split out from 'x86/entry/64: Remove the SYSCALL64 fast path' ] Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Cc: Borislav Petkov Cc: Linus Torvalds Cc: Kernel Hardening Link: https://lkml.kernel.org/r/462dff8d4d64dfbfc851fbf3130641809d980ecd.1517164461.git.luto@kernel.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index dd103ed7a622d5..16e2d72e79a097 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -232,13 +232,17 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) pushq %r9 /* pt_regs->r9 */ pushq %r10 /* pt_regs->r10 */ pushq %r11 /* pt_regs->r11 */ - sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ - UNWIND_HINT_REGS extra=0 + pushq %rbx /* pt_regs->rbx */ + pushq %rbp /* pt_regs->rbp */ + pushq %r12 /* pt_regs->r12 */ + pushq %r13 /* pt_regs->r13 */ + pushq %r14 /* pt_regs->r14 */ + pushq %r15 /* pt_regs->r15 */ + UNWIND_HINT_REGS TRACE_IRQS_OFF /* IRQs are off. */ - SAVE_EXTRA_REGS movq %rsp, %rdi call do_syscall_64 /* returns with IRQs disabled */ From fb9f2d9ab83d9ab1d5286db9432266b353c201c1 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sun, 28 Jan 2018 10:38:50 -0800 Subject: [PATCH 1729/3326] x86/asm: Move 'status' from thread_struct to thread_info commit 37a8f7c38339b22b69876d6f5a0ab851565284e3 The TS_COMPAT bit is very hot and is accessed from code paths that mostly also touch thread_info::flags. Move it into struct thread_info to improve cache locality. The only reason it was in thread_struct is that there was a brief period during which arch-specific fields were not allowed in struct thread_info. Linus suggested further changing: ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED); to: if (unlikely(ti->status & (TS_COMPAT|TS_I386_REGS_POKED))) ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED); on the theory that frequently dirtying the cacheline even in pure 64-bit code that never needs to modify status hurts performance. That could be a reasonable followup patch, but I suspect it matters less on top of this patch. Suggested-by: Linus Torvalds Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar Acked-by: Linus Torvalds Cc: Borislav Petkov Cc: Kernel Hardening Link: https://lkml.kernel.org/r/03148bcc1b217100e6e8ecf6a5468c45cf4304b6.1517164461.git.luto@kernel.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/common.c | 4 ++-- arch/x86/include/asm/processor.h | 2 -- arch/x86/include/asm/syscall.h | 6 +++--- arch/x86/include/asm/thread_info.h | 3 ++- arch/x86/kernel/process_64.c | 4 ++-- arch/x86/kernel/ptrace.c | 2 +- arch/x86/kernel/signal.c | 2 +- 7 files changed, 11 insertions(+), 12 deletions(-) diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 03505ffbe1b68d..4858911eea9d13 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -208,7 +208,7 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs) * special case only applies after poking regs and before the * very next return to user mode. */ - current->thread.status &= ~(TS_COMPAT|TS_I386_REGS_POKED); + ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED); #endif user_enter_irqoff(); @@ -306,7 +306,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) unsigned int nr = (unsigned int)regs->orig_ax; #ifdef CONFIG_IA32_EMULATION - current->thread.status |= TS_COMPAT; + ti->status |= TS_COMPAT; #endif if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) { diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 881ca3b1d6d49f..c57c6e77c29ffb 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -459,8 +459,6 @@ struct thread_struct { unsigned short gsindex; #endif - u32 status; /* thread synchronous flags */ - #ifdef CONFIG_X86_64 unsigned long fsbase; unsigned long gsbase; diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index e3c95e8e61c5fc..03eedc21246d5b 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -60,7 +60,7 @@ static inline long syscall_get_error(struct task_struct *task, * TS_COMPAT is set for 32-bit syscall entries and then * remains set until we return to user mode. */ - if (task->thread.status & (TS_COMPAT|TS_I386_REGS_POKED)) + if (task->thread_info.status & (TS_COMPAT|TS_I386_REGS_POKED)) /* * Sign-extend the value so (int)-EFOO becomes (long)-EFOO * and will match correctly in comparisons. @@ -116,7 +116,7 @@ static inline void syscall_get_arguments(struct task_struct *task, unsigned long *args) { # ifdef CONFIG_IA32_EMULATION - if (task->thread.status & TS_COMPAT) + if (task->thread_info.status & TS_COMPAT) switch (i) { case 0: if (!n--) break; @@ -177,7 +177,7 @@ static inline void syscall_set_arguments(struct task_struct *task, const unsigned long *args) { # ifdef CONFIG_IA32_EMULATION - if (task->thread.status & TS_COMPAT) + if (task->thread_info.status & TS_COMPAT) switch (i) { case 0: if (!n--) break; diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 00223333821a96..eda3b6823ca432 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -55,6 +55,7 @@ struct task_struct; struct thread_info { unsigned long flags; /* low level flags */ + u32 status; /* thread synchronous flags */ }; #define INIT_THREAD_INFO(tsk) \ @@ -221,7 +222,7 @@ static inline int arch_within_stack_frames(const void * const stack, #define in_ia32_syscall() true #else #define in_ia32_syscall() (IS_ENABLED(CONFIG_IA32_EMULATION) && \ - current->thread.status & TS_COMPAT) + current_thread_info()->status & TS_COMPAT) #endif /* diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index c7546623201631..9eb448c7859d24 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -557,7 +557,7 @@ static void __set_personality_x32(void) * Pretend to come from a x32 execve. */ task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT; - current->thread.status &= ~TS_COMPAT; + current_thread_info()->status &= ~TS_COMPAT; #endif } @@ -571,7 +571,7 @@ static void __set_personality_ia32(void) current->personality |= force_personality32; /* Prepare the first "return" to user space */ task_pt_regs(current)->orig_ax = __NR_ia32_execve; - current->thread.status |= TS_COMPAT; + current_thread_info()->status |= TS_COMPAT; #endif } diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index f37d18124648fb..ed5c4cdf0a348d 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -935,7 +935,7 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 value) */ regs->orig_ax = value; if (syscall_get_nr(child, regs) >= 0) - child->thread.status |= TS_I386_REGS_POKED; + child->thread_info.status |= TS_I386_REGS_POKED; break; case offsetof(struct user32, regs.eflags): diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index b9e00e8f1c9b63..4cdc0b27ec82fd 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -787,7 +787,7 @@ static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) * than the tracee. */ #ifdef CONFIG_IA32_EMULATION - if (current->thread.status & (TS_COMPAT|TS_I386_REGS_POKED)) + if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED)) return __NR_ia32_restart_syscall; #endif #ifdef CONFIG_X86_X32_ABI From e72041f70c3c7d149a8c444262173b7e70e3d4f8 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 29 Jan 2018 17:02:16 -0800 Subject: [PATCH 1730/3326] Documentation: Document array_index_nospec commit f84a56f73dddaeac1dba8045b007f742f61cd2da Document the rationale and usage of the new array_index_nospec() helper. Signed-off-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Reviewed-by: Kees Cook Cc: linux-arch@vger.kernel.org Cc: Jonathan Corbet Cc: Peter Zijlstra Cc: gregkh@linuxfoundation.org Cc: kernel-hardening@lists.openwall.com Cc: torvalds@linux-foundation.org Cc: alan@linux.intel.com Link: https://lkml.kernel.org/r/151727413645.33451.15878817161436755393.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Greg Kroah-Hartman --- Documentation/speculation.txt | 90 +++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 Documentation/speculation.txt diff --git a/Documentation/speculation.txt b/Documentation/speculation.txt new file mode 100644 index 00000000000000..e9e6cbae284164 --- /dev/null +++ b/Documentation/speculation.txt @@ -0,0 +1,90 @@ +This document explains potential effects of speculation, and how undesirable +effects can be mitigated portably using common APIs. + +=========== +Speculation +=========== + +To improve performance and minimize average latencies, many contemporary CPUs +employ speculative execution techniques such as branch prediction, performing +work which may be discarded at a later stage. + +Typically speculative execution cannot be observed from architectural state, +such as the contents of registers. However, in some cases it is possible to +observe its impact on microarchitectural state, such as the presence or +absence of data in caches. Such state may form side-channels which can be +observed to extract secret information. + +For example, in the presence of branch prediction, it is possible for bounds +checks to be ignored by code which is speculatively executed. Consider the +following code: + + int load_array(int *array, unsigned int index) + { + if (index >= MAX_ARRAY_ELEMS) + return 0; + else + return array[index]; + } + +Which, on arm64, may be compiled to an assembly sequence such as: + + CMP , #MAX_ARRAY_ELEMS + B.LT less + MOV , #0 + RET + less: + LDR , [, ] + RET + +It is possible that a CPU mis-predicts the conditional branch, and +speculatively loads array[index], even if index >= MAX_ARRAY_ELEMS. This +value will subsequently be discarded, but the speculated load may affect +microarchitectural state which can be subsequently measured. + +More complex sequences involving multiple dependent memory accesses may +result in sensitive information being leaked. Consider the following +code, building on the prior example: + + int load_dependent_arrays(int *arr1, int *arr2, int index) + { + int val1, val2, + + val1 = load_array(arr1, index); + val2 = load_array(arr2, val1); + + return val2; + } + +Under speculation, the first call to load_array() may return the value +of an out-of-bounds address, while the second call will influence +microarchitectural state dependent on this value. This may provide an +arbitrary read primitive. + +==================================== +Mitigating speculation side-channels +==================================== + +The kernel provides a generic API to ensure that bounds checks are +respected even under speculation. Architectures which are affected by +speculation-based side-channels are expected to implement these +primitives. + +The array_index_nospec() helper in can be used to +prevent information from being leaked via side-channels. + +A call to array_index_nospec(index, size) returns a sanitized index +value that is bounded to [0, size) even under cpu speculation +conditions. + +This can be used to protect the earlier load_array() example: + + int load_array(int *array, unsigned int index) + { + if (index >= MAX_ARRAY_ELEMS) + return 0; + else { + index = array_index_nospec(index, MAX_ARRAY_ELEMS); + return array[index]; + } + } From 478742cf80b732f6c3227c572184e18b6e8bcb05 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 Jan 2018 17:02:22 -0800 Subject: [PATCH 1731/3326] array_index_nospec: Sanitize speculative array de-references commit f3804203306e098dae9ca51540fcd5eb700d7f40 array_index_nospec() is proposed as a generic mechanism to mitigate against Spectre-variant-1 attacks, i.e. an attack that bypasses boundary checks via speculative execution. The array_index_nospec() implementation is expected to be safe for current generation CPUs across multiple architectures (ARM, x86). Based on an original implementation by Linus Torvalds, tweaked to remove speculative flows by Alexei Starovoitov, and tweaked again by Linus to introduce an x86 assembly implementation for the mask generation. Co-developed-by: Linus Torvalds Co-developed-by: Alexei Starovoitov Suggested-by: Cyril Novikov Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: kernel-hardening@lists.openwall.com Cc: Peter Zijlstra Cc: Catalin Marinas Cc: Will Deacon Cc: Russell King Cc: gregkh@linuxfoundation.org Cc: torvalds@linux-foundation.org Cc: alan@linux.intel.com Link: https://lkml.kernel.org/r/151727414229.33451.18411580953862676575.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Greg Kroah-Hartman --- include/linux/nospec.h | 72 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 include/linux/nospec.h diff --git a/include/linux/nospec.h b/include/linux/nospec.h new file mode 100644 index 00000000000000..b99bced39ac2fa --- /dev/null +++ b/include/linux/nospec.h @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright(c) 2018 Linus Torvalds. All rights reserved. +// Copyright(c) 2018 Alexei Starovoitov. All rights reserved. +// Copyright(c) 2018 Intel Corporation. All rights reserved. + +#ifndef _LINUX_NOSPEC_H +#define _LINUX_NOSPEC_H + +/** + * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise + * @index: array element index + * @size: number of elements in array + * + * When @index is out of bounds (@index >= @size), the sign bit will be + * set. Extend the sign bit to all bits and invert, giving a result of + * zero for an out of bounds index, or ~0 if within bounds [0, @size). + */ +#ifndef array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Warn developers about inappropriate array_index_nospec() usage. + * + * Even if the CPU speculates past the WARN_ONCE branch, the + * sign bit of @index is taken into account when generating the + * mask. + * + * This warning is compiled out when the compiler can infer that + * @index and @size are less than LONG_MAX. + */ + if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, + "array_index_nospec() limited to range of [0, LONG_MAX]\n")) + return 0; + + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} +#endif + +/* + * array_index_nospec - sanitize an array index after a bounds check + * + * For a code sequence like: + * + * if (index < size) { + * index = array_index_nospec(index, size); + * val = array[index]; + * } + * + * ...if the CPU speculates past the bounds check then + * array_index_nospec() will clamp the index within the range of [0, + * size). + */ +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + _i &= _mask; \ + _i; \ +}) +#endif /* _LINUX_NOSPEC_H */ From 4820d42835b2741fc18b8f6135c90dc1128974bc Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 Jan 2018 17:02:28 -0800 Subject: [PATCH 1732/3326] x86: Implement array_index_mask_nospec commit babdde2698d482b6c0de1eab4f697cf5856c5859 array_index_nospec() uses a mask to sanitize user controllable array indexes, i.e. generate a 0 mask if 'index' >= 'size', and a ~0 mask otherwise. While the default array_index_mask_nospec() handles the carry-bit from the (index - size) result in software. The x86 array_index_mask_nospec() does the same, but the carry-bit is handled in the processor CF flag without conditional instructions in the control flow. Suggested-by: Linus Torvalds Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: kernel-hardening@lists.openwall.com Cc: gregkh@linuxfoundation.org Cc: alan@linux.intel.com Link: https://lkml.kernel.org/r/151727414808.33451.1873237130672785331.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/barrier.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 01727dbc294a32..551ccbe07c0ff9 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -24,6 +24,30 @@ #define wmb() asm volatile("sfence" ::: "memory") #endif +/** + * array_index_mask_nospec() - generate a mask that is ~0UL when the + * bounds check succeeds and 0 otherwise + * @index: array element index + * @size: number of elements in array + * + * Returns: + * 0 - (index < size) + */ +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + unsigned long mask; + + asm ("cmp %1,%2; sbb %0,%0;" + :"=r" (mask) + :"r"(size),"r" (index) + :"cc"); + return mask; +} + +/* Override the default implementation from linux/nospec.h. */ +#define array_index_mask_nospec array_index_mask_nospec + #ifdef CONFIG_X86_PPRO_FENCE #define dma_rmb() rmb() #else From 437ac7b6868da05620764c143c0e4f7aa91e8fc7 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 Jan 2018 17:02:33 -0800 Subject: [PATCH 1733/3326] x86: Introduce barrier_nospec commit b3d7ad85b80bbc404635dca80f5b129f6242bc7a Rename the open coded form of this instruction sequence from rdtsc_ordered() into a generic barrier primitive, barrier_nospec(). One of the mitigations for Spectre variant1 vulnerabilities is to fence speculative execution after successfully validating a bounds check. I.e. force the result of a bounds check to resolve in the instruction pipeline to ensure speculative execution honors that result before potentially operating on out-of-bounds data. No functional changes. Suggested-by: Linus Torvalds Suggested-by: Andi Kleen Suggested-by: Ingo Molnar Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: Tom Lendacky Cc: Kees Cook Cc: kernel-hardening@lists.openwall.com Cc: gregkh@linuxfoundation.org Cc: Al Viro Cc: alan@linux.intel.com Link: https://lkml.kernel.org/r/151727415361.33451.9049453007262764675.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/barrier.h | 4 ++++ arch/x86/include/asm/msr.h | 3 +-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 551ccbe07c0ff9..1e7c955b630358 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -48,6 +48,10 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, /* Override the default implementation from linux/nospec.h. */ #define array_index_mask_nospec array_index_mask_nospec +/* Prevent speculative execution past this barrier. */ +#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \ + "lfence", X86_FEATURE_LFENCE_RDTSC) + #ifdef CONFIG_X86_PPRO_FENCE #define dma_rmb() rmb() #else diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 07962f5f6fba8d..30df295f6d94c8 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -214,8 +214,7 @@ static __always_inline unsigned long long rdtsc_ordered(void) * that some other imaginary CPU is updating continuously with a * time stamp. */ - alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, - "lfence", X86_FEATURE_LFENCE_RDTSC); + barrier_nospec(); return rdtsc(); } From 31c5b332189ec6692ddd92269261db93c835a4d9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 Jan 2018 17:02:39 -0800 Subject: [PATCH 1734/3326] x86: Introduce __uaccess_begin_nospec() and uaccess_try_nospec commit b3bbfb3fb5d25776b8e3f361d2eedaabb0b496cd For __get_user() paths, do not allow the kernel to speculate on the value of a user controlled pointer. In addition to the 'stac' instruction for Supervisor Mode Access Protection (SMAP), a barrier_nospec() causes the access_ok() result to resolve in the pipeline before the CPU might take any speculative action on the pointer value. Given the cost of 'stac' the speculation barrier is placed after 'stac' to hopefully overlap the cost of disabling SMAP with the cost of flushing the instruction pipeline. Since __get_user is a major kernel interface that deals with user controlled pointers, the __uaccess_begin_nospec() mechanism will prevent speculative execution past an access_ok() permission check. While speculative execution past access_ok() is not enough to lead to a kernel memory leak, it is a necessary precondition. To be clear, __uaccess_begin_nospec() is addressing a class of potential problems near __get_user() usages. Note, that while the barrier_nospec() in __uaccess_begin_nospec() is used to protect __get_user(), pointer masking similar to array_index_nospec() will be used for get_user() since it incorporates a bounds check near the usage. uaccess_try_nospec provides the same mechanism for get_user_try. No functional changes. Suggested-by: Linus Torvalds Suggested-by: Andi Kleen Suggested-by: Ingo Molnar Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: Tom Lendacky Cc: Kees Cook Cc: kernel-hardening@lists.openwall.com Cc: gregkh@linuxfoundation.org Cc: Al Viro Cc: alan@linux.intel.com Link: https://lkml.kernel.org/r/151727415922.33451.5796614273104346583.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/uaccess.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 574dff4d2913d1..663e9bde9fc969 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -124,6 +124,11 @@ extern int __get_user_bad(void); #define __uaccess_begin() stac() #define __uaccess_end() clac() +#define __uaccess_begin_nospec() \ +({ \ + stac(); \ + barrier_nospec(); \ +}) /* * This is a type: either unsigned long, if the argument fits into @@ -487,6 +492,10 @@ struct __large_struct { unsigned long buf[100]; }; __uaccess_begin(); \ barrier(); +#define uaccess_try_nospec do { \ + current->thread.uaccess_err = 0; \ + __uaccess_begin_nospec(); \ + #define uaccess_catch(err) \ __uaccess_end(); \ (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ From 2406eb9f4568098f2353f1c9267dbe7e7f4a546b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 Jan 2018 17:02:44 -0800 Subject: [PATCH 1735/3326] x86/usercopy: Replace open coded stac/clac with __uaccess_{begin, end} commit b5c4ae4f35325d520b230bab6eb3310613b72ac1 In preparation for converting some __uaccess_begin() instances to __uacess_begin_nospec(), make sure all 'from user' uaccess paths are using the _begin(), _end() helpers rather than open-coded stac() and clac(). No functional changes. Suggested-by: Ingo Molnar Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: Tom Lendacky Cc: Kees Cook Cc: kernel-hardening@lists.openwall.com Cc: gregkh@linuxfoundation.org Cc: Al Viro Cc: torvalds@linux-foundation.org Cc: alan@linux.intel.com Link: https://lkml.kernel.org/r/151727416438.33451.17309465232057176966.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/lib/usercopy_32.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 1b377f734e64a8..de3436719e26df 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -331,12 +331,12 @@ do { \ unsigned long __copy_user_ll(void *to, const void *from, unsigned long n) { - stac(); + __uaccess_begin(); if (movsl_is_ok(to, from, n)) __copy_user(to, from, n); else n = __copy_user_intel(to, from, n); - clac(); + __uaccess_end(); return n; } EXPORT_SYMBOL(__copy_user_ll); @@ -344,7 +344,7 @@ EXPORT_SYMBOL(__copy_user_ll); unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long n) { - stac(); + __uaccess_begin(); #ifdef CONFIG_X86_INTEL_USERCOPY if (n > 64 && static_cpu_has(X86_FEATURE_XMM2)) n = __copy_user_intel_nocache(to, from, n); @@ -353,7 +353,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr #else __copy_user(to, from, n); #endif - clac(); + __uaccess_end(); return n; } EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); From 5f40de41ccae0c17105d8336e44d704dc5500a50 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 Jan 2018 17:02:49 -0800 Subject: [PATCH 1736/3326] x86/uaccess: Use __uaccess_begin_nospec() and uaccess_try_nospec commit 304ec1b050310548db33063e567123fae8fd0301 Quoting Linus: I do think that it would be a good idea to very expressly document the fact that it's not that the user access itself is unsafe. I do agree that things like "get_user()" want to be protected, but not because of any direct bugs or problems with get_user() and friends, but simply because get_user() is an excellent source of a pointer that is obviously controlled from a potentially attacking user space. So it's a prime candidate for then finding _subsequent_ accesses that can then be used to perturb the cache. __uaccess_begin_nospec() covers __get_user() and copy_from_iter() where the limit check is far away from the user pointer de-reference. In those cases a barrier_nospec() prevents speculation with a potential pointer to privileged memory. uaccess_try_nospec covers get_user_try. Suggested-by: Linus Torvalds Suggested-by: Andi Kleen Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: Kees Cook Cc: kernel-hardening@lists.openwall.com Cc: gregkh@linuxfoundation.org Cc: Al Viro Cc: alan@linux.intel.com Link: https://lkml.kernel.org/r/151727416953.33451.10508284228526170604.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/uaccess.h | 6 +++--- arch/x86/include/asm/uaccess_32.h | 6 +++--- arch/x86/include/asm/uaccess_64.h | 12 ++++++------ arch/x86/lib/usercopy_32.c | 4 ++-- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 663e9bde9fc969..aae77eb8491c0d 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -450,7 +450,7 @@ do { \ ({ \ int __gu_err; \ __inttype(*(ptr)) __gu_val; \ - __uaccess_begin(); \ + __uaccess_begin_nospec(); \ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ __uaccess_end(); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ @@ -557,7 +557,7 @@ struct __large_struct { unsigned long buf[100]; }; * get_user_ex(...); * } get_user_catch(err) */ -#define get_user_try uaccess_try +#define get_user_try uaccess_try_nospec #define get_user_catch(err) uaccess_catch(err) #define get_user_ex(x, ptr) do { \ @@ -591,7 +591,7 @@ extern void __cmpxchg_wrong_size(void) __typeof__(ptr) __uval = (uval); \ __typeof__(*(ptr)) __old = (old); \ __typeof__(*(ptr)) __new = (new); \ - __uaccess_begin(); \ + __uaccess_begin_nospec(); \ switch (size) { \ case 1: \ { \ diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 72950401b22374..ba2dc193063037 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -29,21 +29,21 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) switch (n) { case 1: ret = 0; - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u8 *)to, from, ret, "b", "b", "=q", 1); __uaccess_end(); return ret; case 2: ret = 0; - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u16 *)to, from, ret, "w", "w", "=r", 2); __uaccess_end(); return ret; case 4: ret = 0; - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u32 *)to, from, ret, "l", "k", "=r", 4); __uaccess_end(); diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index f07ef3c575db0b..62546b3a398eb9 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -55,31 +55,31 @@ raw_copy_from_user(void *dst, const void __user *src, unsigned long size) return copy_user_generic(dst, (__force void *)src, size); switch (size) { case 1: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src, ret, "b", "b", "=q", 1); __uaccess_end(); return ret; case 2: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src, ret, "w", "w", "=r", 2); __uaccess_end(); return ret; case 4: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src, ret, "l", "k", "=r", 4); __uaccess_end(); return ret; case 8: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 8); __uaccess_end(); return ret; case 10: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 10); if (likely(!ret)) @@ -89,7 +89,7 @@ raw_copy_from_user(void *dst, const void __user *src, unsigned long size) __uaccess_end(); return ret; case 16: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 16); if (likely(!ret)) diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index de3436719e26df..7add8ba06887ef 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -331,7 +331,7 @@ do { \ unsigned long __copy_user_ll(void *to, const void *from, unsigned long n) { - __uaccess_begin(); + __uaccess_begin_nospec(); if (movsl_is_ok(to, from, n)) __copy_user(to, from, n); else @@ -344,7 +344,7 @@ EXPORT_SYMBOL(__copy_user_ll); unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long n) { - __uaccess_begin(); + __uaccess_begin_nospec(); #ifdef CONFIG_X86_INTEL_USERCOPY if (n > 64 && static_cpu_has(X86_FEATURE_XMM2)) n = __copy_user_intel_nocache(to, from, n); From edaf1538d3a5620862544a20aab14cb601787e1a Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 Jan 2018 17:02:54 -0800 Subject: [PATCH 1737/3326] x86/get_user: Use pointer masking to limit speculation commit c7f631cb07e7da06ac1d231ca178452339e32a94 Quoting Linus: I do think that it would be a good idea to very expressly document the fact that it's not that the user access itself is unsafe. I do agree that things like "get_user()" want to be protected, but not because of any direct bugs or problems with get_user() and friends, but simply because get_user() is an excellent source of a pointer that is obviously controlled from a potentially attacking user space. So it's a prime candidate for then finding _subsequent_ accesses that can then be used to perturb the cache. Unlike the __get_user() case get_user() includes the address limit check near the pointer de-reference. With that locality the speculation can be mitigated with pointer narrowing rather than a barrier, i.e. array_index_nospec(). Where the narrowing is performed by: cmp %limit, %ptr sbb %mask, %mask and %mask, %ptr With respect to speculation the value of %ptr is either less than %limit or NULL. Co-developed-by: Linus Torvalds Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: Kees Cook Cc: kernel-hardening@lists.openwall.com Cc: gregkh@linuxfoundation.org Cc: Al Viro Cc: Andy Lutomirski Cc: torvalds@linux-foundation.org Cc: alan@linux.intel.com Link: https://lkml.kernel.org/r/151727417469.33451.11804043010080838495.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/lib/getuser.S | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index c97d935a29e802..49b167f732151e 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -40,6 +40,8 @@ ENTRY(__get_user_1) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 1: movzbl (%_ASM_AX),%edx xor %eax,%eax @@ -54,6 +56,8 @@ ENTRY(__get_user_2) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 2: movzwl -1(%_ASM_AX),%edx xor %eax,%eax @@ -68,6 +72,8 @@ ENTRY(__get_user_4) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 3: movl -3(%_ASM_AX),%edx xor %eax,%eax @@ -83,6 +89,8 @@ ENTRY(__get_user_8) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 4: movq -7(%_ASM_AX),%rdx xor %eax,%eax @@ -94,6 +102,8 @@ ENTRY(__get_user_8) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user_8 + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 4: movl -7(%_ASM_AX),%edx 5: movl -3(%_ASM_AX),%ecx From 0035134041207f990e0756e2a6f63b7dc3bfe95b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 Jan 2018 17:02:59 -0800 Subject: [PATCH 1738/3326] x86/syscall: Sanitize syscall table de-references under speculation commit 2fbd7af5af8665d18bcefae3e9700be07e22b681 The syscall table base is a user controlled function pointer in kernel space. Use array_index_nospec() to prevent any out of bounds speculation. While retpoline prevents speculating into a userspace directed target it does not stop the pointer de-reference, the concern is leaking memory relative to the syscall table base, by observing instruction cache behavior. Reported-by: Linus Torvalds Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: kernel-hardening@lists.openwall.com Cc: gregkh@linuxfoundation.org Cc: Andy Lutomirski Cc: alan@linux.intel.com Link: https://lkml.kernel.org/r/151727417984.33451.1216731042505722161.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/common.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 4858911eea9d13..60e21ccfb6d6c0 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -284,7 +285,8 @@ __visible void do_syscall_64(struct pt_regs *regs) * regs->orig_ax, which changes the behavior of some syscalls. */ if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) { - regs->ax = sys_call_table[nr & __SYSCALL_MASK]( + nr = array_index_nospec(nr & __SYSCALL_MASK, NR_syscalls); + regs->ax = sys_call_table[nr]( regs->di, regs->si, regs->dx, regs->r10, regs->r8, regs->r9); } @@ -320,6 +322,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) } if (likely(nr < IA32_NR_syscalls)) { + nr = array_index_nospec(nr, IA32_NR_syscalls); /* * It's possible that a 32-bit syscall implementation * takes a 64-bit parameter but nonetheless assumes that From 98116c32d3b4b60bc1add46a81ed4f991ef02a7d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 Jan 2018 17:03:05 -0800 Subject: [PATCH 1739/3326] vfs, fdtable: Prevent bounds-check bypass via speculative execution commit 56c30ba7b348b90484969054d561f711ba196507 'fd' is a user controlled value that is used as a data dependency to read from the 'fdt->fd' array. In order to avoid potential leaks of kernel memory values, block speculative execution of the instruction stream that could issue reads based on an invalid 'file *' returned from __fcheck_files. Co-developed-by: Elena Reshetova Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: kernel-hardening@lists.openwall.com Cc: gregkh@linuxfoundation.org Cc: Al Viro Cc: torvalds@linux-foundation.org Cc: alan@linux.intel.com Link: https://lkml.kernel.org/r/151727418500.33451.17392199002892248656.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Greg Kroah-Hartman --- include/linux/fdtable.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 1c65817673db17..41615f38bcff3e 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -82,8 +83,10 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i { struct fdtable *fdt = rcu_dereference_raw(files->fdt); - if (fd < fdt->max_fds) + if (fd < fdt->max_fds) { + fd = array_index_nospec(fd, fdt->max_fds); return rcu_dereference_raw(fdt->fd[fd]); + } return NULL; } From c9daf8144642b9daad69ab50e9cba5ecea2c8e25 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 Jan 2018 17:03:15 -0800 Subject: [PATCH 1740/3326] nl80211: Sanitize array index in parse_txq_params commit 259d8c1e984318497c84eef547bbb6b1d9f4eb05 Wireless drivers rely on parse_txq_params to validate that txq_params->ac is less than NL80211_NUM_ACS by the time the low-level driver's ->conf_tx() handler is called. Use a new helper, array_index_nospec(), to sanitize txq_params->ac with respect to speculation. I.e. ensure that any speculation into ->conf_tx() handlers is done with a value of txq_params->ac that is within the bounds of [0, NL80211_NUM_ACS). Reported-by: Christian Lamparter Reported-by: Elena Reshetova Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Acked-by: Johannes Berg Cc: linux-arch@vger.kernel.org Cc: kernel-hardening@lists.openwall.com Cc: gregkh@linuxfoundation.org Cc: linux-wireless@vger.kernel.org Cc: torvalds@linux-foundation.org Cc: "David S. Miller" Cc: alan@linux.intel.com Link: https://lkml.kernel.org/r/151727419584.33451.7700736761686184303.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Greg Kroah-Hartman --- net/wireless/nl80211.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d396cb61a280d2..81bef0676e1db2 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -2056,20 +2057,22 @@ static const struct nla_policy txq_params_policy[NL80211_TXQ_ATTR_MAX + 1] = { static int parse_txq_params(struct nlattr *tb[], struct ieee80211_txq_params *txq_params) { + u8 ac; + if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] || !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] || !tb[NL80211_TXQ_ATTR_AIFS]) return -EINVAL; - txq_params->ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]); + ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]); txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]); txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]); txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]); txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]); - if (txq_params->ac >= NL80211_NUM_ACS) + if (ac >= NL80211_NUM_ACS) return -EINVAL; - + txq_params->ac = array_index_nospec(ac, NL80211_NUM_ACS); return 0; } From 863b308dbb1945949529a0e523c34636430884a7 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 Jan 2018 17:03:21 -0800 Subject: [PATCH 1741/3326] x86/spectre: Report get_user mitigation for spectre_v1 commit edfbae53dab8348fca778531be9f4855d2ca0360 Reflect the presence of get_user(), __get_user(), and 'syscall' protections in sysfs. The expectation is that new and better tooling will allow the kernel to grow more usages of array_index_nospec(), for now, only claim mitigation for __user pointer de-references. Reported-by: Jiri Slaby Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: kernel-hardening@lists.openwall.com Cc: gregkh@linuxfoundation.org Cc: torvalds@linux-foundation.org Cc: alan@linux.intel.com Link: https://lkml.kernel.org/r/151727420158.33451.11658324346540434635.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/bugs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 400c34ec9179af..fdb6068a23b51e 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -297,7 +297,7 @@ ssize_t cpu_show_spectre_v1(struct device *dev, { if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) return sprintf(buf, "Not affected\n"); - return sprintf(buf, "Vulnerable\n"); + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); } ssize_t cpu_show_spectre_v2(struct device *dev, From c962dfa4aca010e05730ff458210b04c4eb9af14 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 30 Jan 2018 19:32:18 +0000 Subject: [PATCH 1742/3326] x86/spectre: Fix spelling mistake: "vunerable"-> "vulnerable" commit e698dcdfcda41efd0984de539767b4cddd235f1e Trivial fix to spelling mistake in pr_err error message text. Signed-off-by: Colin Ian King Signed-off-by: Thomas Gleixner Cc: Andi Kleen Cc: Greg Kroah-Hartman Cc: kernel-janitors@vger.kernel.org Cc: Andy Lutomirski Cc: Borislav Petkov Cc: David Woodhouse Link: https://lkml.kernel.org/r/20180130193218.9271-1-colin.king@canonical.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/bugs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index fdb6068a23b51e..dd3a3cc84591ff 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -103,7 +103,7 @@ bool retpoline_module_ok(bool has_retpoline) if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) return true; - pr_err("System may be vunerable to spectre v2\n"); + pr_err("System may be vulnerable to spectre v2\n"); spectre_v2_bad_module = true; return false; } From 85543d7613c481392d564a4956850591b2025f5d Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 30 Jan 2018 14:30:23 +0000 Subject: [PATCH 1743/3326] x86/cpuid: Fix up "virtual" IBRS/IBPB/STIBP feature bits on Intel commit 7fcae1118f5fd44a862aa5c3525248e35ee67c3b Despite the fact that all the other code there seems to be doing it, just using set_cpu_cap() in early_intel_init() doesn't actually work. For CPUs with PKU support, setup_pku() calls get_cpu_cap() after c->c_init() has set those feature bits. That resets those bits back to what was queried from the hardware. Turning the bits off for bad microcode is easy to fix. That can just use setup_clear_cpu_cap() to force them off for all CPUs. I was less keen on forcing the feature bits *on* that way, just in case of inconsistencies. I appreciate that the kernel is going to get this utterly wrong if CPU features are not consistent, because it has already applied alternatives by the time secondary CPUs are brought up. But at least if setup_force_cpu_cap() isn't being used, we might have a chance of *detecting* the lack of the corresponding bit and either panicking or refusing to bring the offending CPU online. So ensure that the appropriate feature bits are set within get_cpu_cap() regardless of how many extra times it's called. Fixes: 2961298e ("x86/cpufeatures: Clean up Spectre v2 related CPUID flags") Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Cc: karahmed@amazon.de Cc: peterz@infradead.org Cc: bp@alien8.de Link: https://lkml.kernel.org/r/1517322623-15261-1-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/common.c | 21 +++++++++++++++++++++ arch/x86/kernel/cpu/intel.c | 27 ++++++++------------------- 2 files changed, 29 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 970ee06dc8aad6..f9d8460d34a113 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -726,6 +726,26 @@ static void apply_forced_caps(struct cpuinfo_x86 *c) } } +static void init_speculation_control(struct cpuinfo_x86 *c) +{ + /* + * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, + * and they also have a different bit for STIBP support. Also, + * a hypervisor might have set the individual AMD bits even on + * Intel CPUs, for finer-grained selection of what's available. + * + * We use the AMD bits in 0x8000_0008 EBX as the generic hardware + * features, which are visible in /proc/cpuinfo and used by the + * kernel. So set those accordingly from the Intel bits. + */ + if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { + set_cpu_cap(c, X86_FEATURE_IBRS); + set_cpu_cap(c, X86_FEATURE_IBPB); + } + if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) + set_cpu_cap(c, X86_FEATURE_STIBP); +} + void get_cpu_cap(struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; @@ -820,6 +840,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); init_scattered_cpuid_features(c); + init_speculation_control(c); /* * Clear/Set all flags overridden by options, after probe. diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 0c8b916abced5c..4cf4f8cbc69da1 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -175,28 +175,17 @@ static void early_init_intel(struct cpuinfo_x86 *c) if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) c->microcode = intel_get_microcode_revision(); - /* - * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, - * and they also have a different bit for STIBP support. Also, - * a hypervisor might have set the individual AMD bits even on - * Intel CPUs, for finer-grained selection of what's available. - */ - if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { - set_cpu_cap(c, X86_FEATURE_IBRS); - set_cpu_cap(c, X86_FEATURE_IBPB); - } - if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) - set_cpu_cap(c, X86_FEATURE_STIBP); - /* Now if any of them are set, check the blacklist and clear the lot */ - if ((cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) || + if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) || + cpu_has(c, X86_FEATURE_INTEL_STIBP) || + cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) || cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) { pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n"); - clear_cpu_cap(c, X86_FEATURE_IBRS); - clear_cpu_cap(c, X86_FEATURE_IBPB); - clear_cpu_cap(c, X86_FEATURE_STIBP); - clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL); - clear_cpu_cap(c, X86_FEATURE_INTEL_STIBP); + setup_clear_cpu_cap(X86_FEATURE_IBRS); + setup_clear_cpu_cap(X86_FEATURE_IBPB); + setup_clear_cpu_cap(X86_FEATURE_STIBP); + setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); + setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); } /* From 7a3f12294da45e779b313b33077f04b87782985b Mon Sep 17 00:00:00 2001 From: Tim Chen Date: Mon, 29 Jan 2018 22:04:47 +0000 Subject: [PATCH 1744/3326] x86/speculation: Use Indirect Branch Prediction Barrier in context switch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 18bf3c3ea8ece8f03b6fc58508f2dfd23c7711c7 Flush indirect branches when switching into a process that marked itself non dumpable. This protects high value processes like gpg better, without having too high performance overhead. If done naïvely, we could switch to a kernel idle thread and then back to the original process, such as: process A -> idle -> process A In such scenario, we do not have to do IBPB here even though the process is non-dumpable, as we are switching back to the same process after a hiatus. To avoid the redundant IBPB, which is expensive, we track the last mm user context ID. The cost is to have an extra u64 mm context id to track the last mm we were using before switching to the init_mm used by idle. Avoiding the extra IBPB is probably worth the extra memory for this common scenario. For those cases where tlb_defer_switch_to_init_mm() returns true (non PCID), lazy tlb will defer switch to init_mm, so we will not be changing the mm for the process A -> idle -> process A switch. So IBPB will be skipped for this case. Thanks to the reviewers and Andy Lutomirski for the suggestion of using ctx_id which got rid of the problem of mm pointer recycling. Signed-off-by: Tim Chen Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Cc: ak@linux.intel.com Cc: karahmed@amazon.de Cc: arjan@linux.intel.com Cc: torvalds@linux-foundation.org Cc: linux@dominikbrodowski.net Cc: peterz@infradead.org Cc: bp@alien8.de Cc: luto@kernel.org Cc: pbonzini@redhat.com Cc: gregkh@linux-foundation.org Link: https://lkml.kernel.org/r/1517263487-3708-1-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/tlbflush.h | 2 ++ arch/x86/mm/tlb.c | 33 ++++++++++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 3effd3c994afe7..4405c4b308e899 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -174,6 +174,8 @@ struct tlb_state { struct mm_struct *loaded_mm; u16 loaded_mm_asid; u16 next_asid; + /* last user mm's ctx id */ + u64 last_ctx_id; /* * We can be in one of several states: diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 5bfe61a5e8e3c6..012d0262484894 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -6,13 +6,14 @@ #include #include #include +#include #include #include +#include #include #include #include -#include /* * TLB flushing, formerly SMP-only @@ -247,6 +248,27 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, } else { u16 new_asid; bool need_flush; + u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id); + + /* + * Avoid user/user BTB poisoning by flushing the branch + * predictor when switching between processes. This stops + * one process from doing Spectre-v2 attacks on another. + * + * As an optimization, flush indirect branches only when + * switching into processes that disable dumping. This + * protects high value processes like gpg, without having + * too high performance overhead. IBPB is *expensive*! + * + * This will not flush branches when switching into kernel + * threads. It will also not flush if we switch to idle + * thread and back to the same process. It will flush if we + * switch to a different non-dumpable process. + */ + if (tsk && tsk->mm && + tsk->mm->context.ctx_id != last_ctx_id && + get_dumpable(tsk->mm) != SUID_DUMP_USER) + indirect_branch_prediction_barrier(); if (IS_ENABLED(CONFIG_VMAP_STACK)) { /* @@ -292,6 +314,14 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); } + /* + * Record last user mm's context id, so we can avoid + * flushing branch buffer with IBPB if we switch back + * to the same user. + */ + if (next != &init_mm) + this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); + this_cpu_write(cpu_tlbstate.loaded_mm, next); this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); } @@ -369,6 +399,7 @@ void initialize_tlbstate_and_flush(void) write_cr3(build_cr3(mm->pgd, 0)); /* Reinitialize tlbstate. */ + this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id); this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); this_cpu_write(cpu_tlbstate.next_asid, 1); this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); From ad368e5b2d56ed059ebc8343cd7e713f3a7717d4 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Tue, 30 Jan 2018 22:13:33 -0600 Subject: [PATCH 1745/3326] x86/paravirt: Remove 'noreplace-paravirt' cmdline option commit 12c69f1e94c89d40696e83804dd2f0965b5250cd The 'noreplace-paravirt' option disables paravirt patching, leaving the original pv indirect calls in place. That's highly incompatible with retpolines, unless we want to uglify paravirt even further and convert the paravirt calls to retpolines. As far as I can tell, the option doesn't seem to be useful for much other than introducing surprising corner cases and making the kernel vulnerable to Spectre v2. It was probably a debug option from the early paravirt days. So just remove it. Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Reviewed-by: Juergen Gross Cc: Andrea Arcangeli Cc: Peter Zijlstra Cc: Andi Kleen Cc: Ashok Raj Cc: Greg KH Cc: Jun Nakajima Cc: Tim Chen Cc: Rusty Russell Cc: Dave Hansen Cc: Asit Mallick Cc: Andy Lutomirski Cc: Linus Torvalds Cc: Jason Baron Cc: Paolo Bonzini Cc: Alok Kataria Cc: Arjan Van De Ven Cc: David Woodhouse Cc: Dan Williams Link: https://lkml.kernel.org/r/20180131041333.2x6blhxirc2kclrq@treble Signed-off-by: Greg Kroah-Hartman --- Documentation/admin-guide/kernel-parameters.txt | 2 -- arch/x86/kernel/alternative.c | 14 -------------- 2 files changed, 16 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 8122b5f98ea1eb..c76afdcafbef0d 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2718,8 +2718,6 @@ norandmaps Don't use address space randomization. Equivalent to echo 0 > /proc/sys/kernel/randomize_va_space - noreplace-paravirt [X86,IA-64,PV_OPS] Don't patch paravirt_ops - noreplace-smp [X86-32,SMP] Don't replace SMP instructions with UP alternatives diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 14a52c7d23d468..21be0193d9dc3e 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -46,17 +46,6 @@ static int __init setup_noreplace_smp(char *str) } __setup("noreplace-smp", setup_noreplace_smp); -#ifdef CONFIG_PARAVIRT -static int __initdata_or_module noreplace_paravirt = 0; - -static int __init setup_noreplace_paravirt(char *str) -{ - noreplace_paravirt = 1; - return 1; -} -__setup("noreplace-paravirt", setup_noreplace_paravirt); -#endif - #define DPRINTK(fmt, args...) \ do { \ if (debug_alternative) \ @@ -599,9 +588,6 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, struct paravirt_patch_site *p; char insnbuf[MAX_PATCH_LEN]; - if (noreplace_paravirt) - return; - for (p = start; p < end; p++) { unsigned int used; From 7f8da2c8a191f274600bafcf6e58f54e16d5c8de Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 16 Jan 2018 16:51:18 +0100 Subject: [PATCH 1746/3326] KVM: VMX: make MSR bitmaps per-VCPU commit 904e14fb7cb96401a7dc803ca2863fd5ba32ffe6 Place the MSR bitmap in struct loaded_vmcs, and update it in place every time the x2apic or APICv state can change. This is rare and the loop can handle 64 MSRs per iteration, in a similar fashion as nested_vmx_prepare_msr_bitmap. This prepares for choosing, on a per-VM basis, whether to intercept the SPEC_CTRL and PRED_CMD MSRs. Cc: stable@vger.kernel.org # prereq for Spectre mitigation Suggested-by: Jim Mattson Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 270 ++++++++++++++++++++++++--------------------- 1 file changed, 147 insertions(+), 123 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index bca85821b0f00c..ab66374e8c826a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -108,6 +108,14 @@ static u64 __read_mostly host_xss; static bool __read_mostly enable_pml = 1; module_param_named(pml, enable_pml, bool, S_IRUGO); +#define MSR_TYPE_R 1 +#define MSR_TYPE_W 2 +#define MSR_TYPE_RW 3 + +#define MSR_BITMAP_MODE_X2APIC 1 +#define MSR_BITMAP_MODE_X2APIC_APICV 2 +#define MSR_BITMAP_MODE_LM 4 + #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL /* Guest_tsc -> host_tsc conversion requires 64-bit division. */ @@ -206,6 +214,7 @@ struct loaded_vmcs { int soft_vnmi_blocked; ktime_t entry_time; s64 vnmi_blocked_time; + unsigned long *msr_bitmap; struct list_head loaded_vmcss_on_cpu_link; }; @@ -446,8 +455,6 @@ struct nested_vmx { bool pi_pending; u16 posted_intr_nv; - unsigned long *msr_bitmap; - struct hrtimer preemption_timer; bool preemption_timer_expired; @@ -562,6 +569,7 @@ struct vcpu_vmx { struct kvm_vcpu vcpu; unsigned long host_rsp; u8 fail; + u8 msr_bitmap_mode; u32 exit_intr_info; u32 idt_vectoring_info; ulong rflags; @@ -919,6 +927,7 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, u16 error_code); +static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); @@ -938,12 +947,6 @@ static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); enum { VMX_IO_BITMAP_A, VMX_IO_BITMAP_B, - VMX_MSR_BITMAP_LEGACY, - VMX_MSR_BITMAP_LONGMODE, - VMX_MSR_BITMAP_LEGACY_X2APIC_APICV, - VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV, - VMX_MSR_BITMAP_LEGACY_X2APIC, - VMX_MSR_BITMAP_LONGMODE_X2APIC, VMX_VMREAD_BITMAP, VMX_VMWRITE_BITMAP, VMX_BITMAP_NR @@ -953,12 +956,6 @@ static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; #define vmx_io_bitmap_a (vmx_bitmap[VMX_IO_BITMAP_A]) #define vmx_io_bitmap_b (vmx_bitmap[VMX_IO_BITMAP_B]) -#define vmx_msr_bitmap_legacy (vmx_bitmap[VMX_MSR_BITMAP_LEGACY]) -#define vmx_msr_bitmap_longmode (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE]) -#define vmx_msr_bitmap_legacy_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC_APICV]) -#define vmx_msr_bitmap_longmode_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV]) -#define vmx_msr_bitmap_legacy_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC]) -#define vmx_msr_bitmap_longmode_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC]) #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) @@ -2559,36 +2556,6 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) vmx->guest_msrs[from] = tmp; } -static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) -{ - unsigned long *msr_bitmap; - - if (is_guest_mode(vcpu)) - msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap; - else if (cpu_has_secondary_exec_ctrls() && - (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & - SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { - if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) { - if (is_long_mode(vcpu)) - msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv; - else - msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv; - } else { - if (is_long_mode(vcpu)) - msr_bitmap = vmx_msr_bitmap_longmode_x2apic; - else - msr_bitmap = vmx_msr_bitmap_legacy_x2apic; - } - } else { - if (is_long_mode(vcpu)) - msr_bitmap = vmx_msr_bitmap_longmode; - else - msr_bitmap = vmx_msr_bitmap_legacy; - } - - vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); -} - /* * Set up the vmcs to automatically save and restore system * msrs. Don't touch the 64-bit msrs if the guest is in legacy @@ -2629,7 +2596,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) vmx->save_nmsrs = save_nmsrs; if (cpu_has_vmx_msr_bitmap()) - vmx_set_msr_bitmap(&vmx->vcpu); + vmx_update_msr_bitmap(&vmx->vcpu); } /* @@ -3829,6 +3796,8 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) loaded_vmcs_clear(loaded_vmcs); free_vmcs(loaded_vmcs->vmcs); loaded_vmcs->vmcs = NULL; + if (loaded_vmcs->msr_bitmap) + free_page((unsigned long)loaded_vmcs->msr_bitmap); WARN_ON(loaded_vmcs->shadow_vmcs != NULL); } @@ -3845,7 +3814,18 @@ static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) loaded_vmcs->shadow_vmcs = NULL; loaded_vmcs_init(loaded_vmcs); + + if (cpu_has_vmx_msr_bitmap()) { + loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); + if (!loaded_vmcs->msr_bitmap) + goto out_vmcs; + memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); + } return 0; + +out_vmcs: + free_loaded_vmcs(loaded_vmcs); + return -ENOMEM; } static void free_kvm_area(void) @@ -4920,10 +4900,8 @@ static void free_vpid(int vpid) spin_unlock(&vmx_vpid_lock); } -#define MSR_TYPE_R 1 -#define MSR_TYPE_W 2 -static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, - u32 msr, int type) +static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type) { int f = sizeof(unsigned long); @@ -4957,6 +4935,50 @@ static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, } } +static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type) +{ + int f = sizeof(unsigned long); + + if (!cpu_has_vmx_msr_bitmap()) + return; + + /* + * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals + * have the write-low and read-high bitmap offsets the wrong way round. + * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. + */ + if (msr <= 0x1fff) { + if (type & MSR_TYPE_R) + /* read-low */ + __set_bit(msr, msr_bitmap + 0x000 / f); + + if (type & MSR_TYPE_W) + /* write-low */ + __set_bit(msr, msr_bitmap + 0x800 / f); + + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + if (type & MSR_TYPE_R) + /* read-high */ + __set_bit(msr, msr_bitmap + 0x400 / f); + + if (type & MSR_TYPE_W) + /* write-high */ + __set_bit(msr, msr_bitmap + 0xc00 / f); + + } +} + +static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type, bool value) +{ + if (value) + vmx_enable_intercept_for_msr(msr_bitmap, msr, type); + else + vmx_disable_intercept_for_msr(msr_bitmap, msr, type); +} + /* * If a msr is allowed by L0, we should check whether it is allowed by L1. * The corresponding bit will be cleared unless both of L0 and L1 allow it. @@ -5003,30 +5025,70 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, } } -static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) +static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu) { - if (!longmode_only) - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, - msr, MSR_TYPE_R | MSR_TYPE_W); - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, - msr, MSR_TYPE_R | MSR_TYPE_W); + u8 mode = 0; + + if (cpu_has_secondary_exec_ctrls() && + (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { + mode |= MSR_BITMAP_MODE_X2APIC; + if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) + mode |= MSR_BITMAP_MODE_X2APIC_APICV; + } + + if (is_long_mode(vcpu)) + mode |= MSR_BITMAP_MODE_LM; + + return mode; } -static void vmx_disable_intercept_msr_x2apic(u32 msr, int type, bool apicv_active) +#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) + +static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap, + u8 mode) { - if (apicv_active) { - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv, - msr, type); - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv, - msr, type); - } else { - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, - msr, type); - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, - msr, type); + int msr; + + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { + unsigned word = msr / BITS_PER_LONG; + msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0; + msr_bitmap[word + (0x800 / sizeof(long))] = ~0; + } + + if (mode & MSR_BITMAP_MODE_X2APIC) { + /* + * TPR reads and writes can be virtualized even if virtual interrupt + * delivery is not in use. + */ + vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW); + if (mode & MSR_BITMAP_MODE_X2APIC_APICV) { + vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R); + vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W); + vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W); + } } } +static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; + u8 mode = vmx_msr_bitmap_mode(vcpu); + u8 changed = mode ^ vmx->msr_bitmap_mode; + + if (!changed) + return; + + vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW, + !(mode & MSR_BITMAP_MODE_LM)); + + if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV)) + vmx_update_msr_bitmap_x2apic(msr_bitmap, mode); + + vmx->msr_bitmap_mode = mode; +} + static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu) { return enable_apicv; @@ -5272,7 +5334,7 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) } if (cpu_has_vmx_msr_bitmap()) - vmx_set_msr_bitmap(vcpu); + vmx_update_msr_bitmap(vcpu); } static u32 vmx_exec_control(struct vcpu_vmx *vmx) @@ -5459,7 +5521,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); } if (cpu_has_vmx_msr_bitmap()) - vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy)); + vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ @@ -6742,7 +6804,7 @@ void vmx_enable_tdp(void) static __init int hardware_setup(void) { - int r = -ENOMEM, i, msr; + int r = -ENOMEM, i; rdmsrl_safe(MSR_EFER, &host_efer); @@ -6763,9 +6825,6 @@ static __init int hardware_setup(void) memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); - memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); - memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); - if (setup_vmcs_config(&vmcs_config) < 0) { r = -EIO; goto out; @@ -6828,42 +6887,8 @@ static __init int hardware_setup(void) kvm_tsc_scaling_ratio_frac_bits = 48; } - vmx_disable_intercept_for_msr(MSR_FS_BASE, false); - vmx_disable_intercept_for_msr(MSR_GS_BASE, false); - vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); - vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); - vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); - vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); - - memcpy(vmx_msr_bitmap_legacy_x2apic_apicv, - vmx_msr_bitmap_legacy, PAGE_SIZE); - memcpy(vmx_msr_bitmap_longmode_x2apic_apicv, - vmx_msr_bitmap_longmode, PAGE_SIZE); - memcpy(vmx_msr_bitmap_legacy_x2apic, - vmx_msr_bitmap_legacy, PAGE_SIZE); - memcpy(vmx_msr_bitmap_longmode_x2apic, - vmx_msr_bitmap_longmode, PAGE_SIZE); - set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ - for (msr = 0x800; msr <= 0x8ff; msr++) { - if (msr == 0x839 /* TMCCT */) - continue; - vmx_disable_intercept_msr_x2apic(msr, MSR_TYPE_R, true); - } - - /* - * TPR reads and writes can be virtualized even if virtual interrupt - * delivery is not in use. - */ - vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_W, true); - vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_R | MSR_TYPE_W, false); - - /* EOI */ - vmx_disable_intercept_msr_x2apic(0x80b, MSR_TYPE_W, true); - /* SELF-IPI */ - vmx_disable_intercept_msr_x2apic(0x83f, MSR_TYPE_W, true); - if (enable_ept) vmx_enable_tdp(); else @@ -7152,13 +7177,6 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) if (r < 0) goto out_vmcs02; - if (cpu_has_vmx_msr_bitmap()) { - vmx->nested.msr_bitmap = - (unsigned long *)__get_free_page(GFP_KERNEL); - if (!vmx->nested.msr_bitmap) - goto out_msr_bitmap; - } - vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); if (!vmx->nested.cached_vmcs12) goto out_cached_vmcs12; @@ -7185,9 +7203,6 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) kfree(vmx->nested.cached_vmcs12); out_cached_vmcs12: - free_page((unsigned long)vmx->nested.msr_bitmap); - -out_msr_bitmap: free_loaded_vmcs(&vmx->nested.vmcs02); out_vmcs02: @@ -7332,10 +7347,6 @@ static void free_nested(struct vcpu_vmx *vmx) free_vpid(vmx->nested.vpid02); vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; - if (vmx->nested.msr_bitmap) { - free_page((unsigned long)vmx->nested.msr_bitmap); - vmx->nested.msr_bitmap = NULL; - } if (enable_shadow_vmcs) { vmx_disable_shadow_vmcs(vmx); vmcs_clear(vmx->vmcs01.shadow_vmcs); @@ -8851,7 +8862,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); - vmx_set_msr_bitmap(vcpu); + vmx_update_msr_bitmap(vcpu); } static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) @@ -9513,6 +9524,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) { int err; struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); + unsigned long *msr_bitmap; int cpu; if (!vmx) @@ -9549,6 +9561,15 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) if (err < 0) goto free_msrs; + msr_bitmap = vmx->vmcs01.msr_bitmap; + vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); + vmx->msr_bitmap_mode = 0; + vmx->loaded_vmcs = &vmx->vmcs01; cpu = get_cpu(); vmx_vcpu_load(&vmx->vcpu, cpu); @@ -10018,7 +10039,7 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, int msr; struct page *page; unsigned long *msr_bitmap_l1; - unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap; + unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; /* This shortcut is ok because we support only x2APIC MSRs so far. */ if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) @@ -10595,6 +10616,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); + if (cpu_has_vmx_msr_bitmap()) + vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); + if (enable_vpid) { /* * There is no direct mapping between vpid02 and vpid12, the @@ -11388,7 +11412,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vmcs_write64(GUEST_IA32_DEBUGCTL, 0); if (cpu_has_vmx_msr_bitmap()) - vmx_set_msr_bitmap(vcpu); + vmx_update_msr_bitmap(vcpu); if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, vmcs12->vm_exit_msr_load_count)) From 6f6eb84b14aca572ebe50984be94e036075e6e2b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 31 Jan 2018 17:47:03 -0800 Subject: [PATCH 1747/3326] x86/kvm: Update spectre-v1 mitigation commit 085331dfc6bbe3501fb936e657331ca943827600 Commit 75f139aaf896 "KVM: x86: Add memory barrier on vmcs field lookup" added a raw 'asm("lfence");' to prevent a bounds check bypass of 'vmcs_field_to_offset_table'. The lfence can be avoided in this path by using the array_index_nospec() helper designed for these types of fixes. Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Acked-by: Paolo Bonzini Cc: Andrew Honig Cc: kvm@vger.kernel.org Cc: Jim Mattson Link: https://lkml.kernel.org/r/151744959670.6342.3001723920950249067.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ab66374e8c826a..93e25e6768008d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "kvm_cache_regs.h" #include "x86.h" @@ -887,21 +888,18 @@ static const unsigned short vmcs_field_to_offset_table[] = { static inline short vmcs_field_to_offset(unsigned long field) { - BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); + const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table); + unsigned short offset; - if (field >= ARRAY_SIZE(vmcs_field_to_offset_table)) + BUILD_BUG_ON(size > SHRT_MAX); + if (field >= size) return -ENOENT; - /* - * FIXME: Mitigation for CVE-2017-5753. To be replaced with a - * generic mechanism. - */ - asm("lfence"); - - if (vmcs_field_to_offset_table[field] == 0) + field = array_index_nospec(field, size); + offset = vmcs_field_to_offset_table[field]; + if (offset == 0) return -ENOENT; - - return vmcs_field_to_offset_table[field]; + return offset; } static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) From 838dbae0acd06da5fb0d0173dfddc72ca5650eb0 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 1 Feb 2018 11:27:20 +0000 Subject: [PATCH 1748/3326] x86/retpoline: Avoid retpolines for built-in __init functions commit 66f793099a636862a71c59d4a6ba91387b155e0c There's no point in building init code with retpolines, since it runs before any potentially hostile userspace does. And before the retpoline is actually ALTERNATIVEd into place, for much of it. Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Cc: karahmed@amazon.de Cc: peterz@infradead.org Cc: bp@alien8.de Link: https://lkml.kernel.org/r/1517484441-1420-2-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- include/linux/init.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/include/linux/init.h b/include/linux/init.h index f38b993edacb7c..943139a563e39a 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -5,6 +5,13 @@ #include #include +/* Built-in __init functions needn't be compiled with retpoline */ +#if defined(RETPOLINE) && !defined(MODULE) +#define __noretpoline __attribute__((indirect_branch("keep"))) +#else +#define __noretpoline +#endif + /* These macros are used to mark some functions or * initialized data (doesn't apply to uninitialized data) * as `initialization' functions. The kernel can take this @@ -40,7 +47,7 @@ /* These are for everybody (although not all archs will actually discard it in modules) */ -#define __init __section(.init.text) __cold __inittrace __latent_entropy +#define __init __section(.init.text) __cold __inittrace __latent_entropy __noretpoline #define __initdata __section(.init.data) #define __initconst __section(.init.rodata) #define __exitdata __section(.exit.data) From ebaf2271a024a7000dc83e061349f7bbf2b44281 Mon Sep 17 00:00:00 2001 From: KarimAllah Ahmed Date: Thu, 1 Feb 2018 11:27:21 +0000 Subject: [PATCH 1749/3326] x86/spectre: Simplify spectre_v2 command line parsing commit 9005c6834c0ffdfe46afa76656bd9276cca864f6 [dwmw2: Use ARRAY_SIZE] Signed-off-by: KarimAllah Ahmed Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Cc: peterz@infradead.org Cc: bp@alien8.de Link: https://lkml.kernel.org/r/1517484441-1420-3-git-send-email-dwmw@amazon.co.uk Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/bugs.c | 86 +++++++++++++++++++++++++------------- 1 file changed, 56 insertions(+), 30 deletions(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index dd3a3cc84591ff..71949bf2de5ad3 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -119,13 +119,13 @@ static inline const char *spectre_v2_module_string(void) { return ""; } static void __init spec2_print_if_insecure(const char *reason) { if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) - pr_info("%s\n", reason); + pr_info("%s selected on command line.\n", reason); } static void __init spec2_print_if_secure(const char *reason) { if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) - pr_info("%s\n", reason); + pr_info("%s selected on command line.\n", reason); } static inline bool retp_compiler(void) @@ -140,42 +140,68 @@ static inline bool match_option(const char *arg, int arglen, const char *opt) return len == arglen && !strncmp(arg, opt, len); } +static const struct { + const char *option; + enum spectre_v2_mitigation_cmd cmd; + bool secure; +} mitigation_options[] = { + { "off", SPECTRE_V2_CMD_NONE, false }, + { "on", SPECTRE_V2_CMD_FORCE, true }, + { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, + { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, + { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, + { "auto", SPECTRE_V2_CMD_AUTO, false }, +}; + static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) { char arg[20]; - int ret; - - ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, - sizeof(arg)); - if (ret > 0) { - if (match_option(arg, ret, "off")) { - goto disable; - } else if (match_option(arg, ret, "on")) { - spec2_print_if_secure("force enabled on command line."); - return SPECTRE_V2_CMD_FORCE; - } else if (match_option(arg, ret, "retpoline")) { - spec2_print_if_insecure("retpoline selected on command line."); - return SPECTRE_V2_CMD_RETPOLINE; - } else if (match_option(arg, ret, "retpoline,amd")) { - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { - pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); - return SPECTRE_V2_CMD_AUTO; - } - spec2_print_if_insecure("AMD retpoline selected on command line."); - return SPECTRE_V2_CMD_RETPOLINE_AMD; - } else if (match_option(arg, ret, "retpoline,generic")) { - spec2_print_if_insecure("generic retpoline selected on command line."); - return SPECTRE_V2_CMD_RETPOLINE_GENERIC; - } else if (match_option(arg, ret, "auto")) { + int ret, i; + enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; + + if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) + return SPECTRE_V2_CMD_NONE; + else { + ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, + sizeof(arg)); + if (ret < 0) + return SPECTRE_V2_CMD_AUTO; + + for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { + if (!match_option(arg, ret, mitigation_options[i].option)) + continue; + cmd = mitigation_options[i].cmd; + break; + } + + if (i >= ARRAY_SIZE(mitigation_options)) { + pr_err("unknown option (%s). Switching to AUTO select\n", + mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; } } - if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2")) + if ((cmd == SPECTRE_V2_CMD_RETPOLINE || + cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || + cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && + !IS_ENABLED(CONFIG_RETPOLINE)) { + pr_err("%s selected but not compiled in. Switching to AUTO select\n", + mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; -disable: - spec2_print_if_insecure("disabled on command line."); - return SPECTRE_V2_CMD_NONE; + } + + if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { + pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); + return SPECTRE_V2_CMD_AUTO; + } + + if (mitigation_options[i].secure) + spec2_print_if_secure(mitigation_options[i].option); + else + spec2_print_if_insecure(mitigation_options[i].option); + + return cmd; } /* Check for Skylake-like CPUs (for RSB handling) */ From 6dd1f6989a545446661d0cbecf8748c8d2093549 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 2 Feb 2018 22:39:23 +0100 Subject: [PATCH 1750/3326] x86/pti: Mark constant arrays as __initconst commit 4bf5d56d429cbc96c23d809a08f63cd29e1a702e I'm seeing build failures from the two newly introduced arrays that are marked 'const' and '__initdata', which are mutually exclusive: arch/x86/kernel/cpu/common.c:882:43: error: 'cpu_no_speculation' causes a section type conflict with 'e820_table_firmware_init' arch/x86/kernel/cpu/common.c:895:43: error: 'cpu_no_meltdown' causes a section type conflict with 'e820_table_firmware_init' The correct annotation is __initconst. Fixes: fec9434a12f3 ("x86/pti: Do not enable PTI on CPUs which are not vulnerable to Meltdown") Signed-off-by: Arnd Bergmann Signed-off-by: Thomas Gleixner Cc: Ricardo Neri Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Thomas Garnier Cc: David Woodhouse Link: https://lkml.kernel.org/r/20180202213959.611210-1-arnd@arndb.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f9d8460d34a113..92b66e21bae5b2 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -876,7 +876,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) #endif } -static const __initdata struct x86_cpu_id cpu_no_speculation[] = { +static const __initconst struct x86_cpu_id cpu_no_speculation[] = { { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY }, { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY }, { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY }, @@ -889,7 +889,7 @@ static const __initdata struct x86_cpu_id cpu_no_speculation[] = { {} }; -static const __initdata struct x86_cpu_id cpu_no_meltdown[] = { +static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { { X86_VENDOR_AMD }, {} }; From 4c8298c1fdd3317658a3dcd055b19a4c60df947d Mon Sep 17 00:00:00 2001 From: Darren Kenny Date: Fri, 2 Feb 2018 19:12:20 +0000 Subject: [PATCH 1751/3326] x86/speculation: Fix typo IBRS_ATT, which should be IBRS_ALL commit af189c95a371b59f493dbe0f50c0a09724868881 Fixes: 117cc7a908c83 ("x86/retpoline: Fill return stack buffer on vmexit") Signed-off-by: Darren Kenny Signed-off-by: Thomas Gleixner Reviewed-by: Konrad Rzeszutek Wilk Cc: Tom Lendacky Cc: Andi Kleen Cc: Borislav Petkov Cc: Masami Hiramatsu Cc: Arjan van de Ven Cc: David Woodhouse Link: https://lkml.kernel.org/r/20180202191220.blvgkgutojecxr3b@starbug-vm.ie.oracle.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/nospec-branch.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index d15d471348b815..4d57894635f242 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -150,7 +150,7 @@ extern char __indirect_thunk_end[]; * On VMEXIT we must ensure that no RSB predictions learned in the guest * can be followed in the host, by overwriting the RSB completely. Both * retpoline and IBRS mitigations for Spectre v2 need this; only on future - * CPUs with IBRS_ATT *might* it be avoided. + * CPUs with IBRS_ALL *might* it be avoided. */ static inline void vmexit_fill_RSB(void) { From 8f7c4d52a0e82854e5ccc15a2b53d3ab3efe0452 Mon Sep 17 00:00:00 2001 From: KarimAllah Ahmed Date: Thu, 1 Feb 2018 22:59:42 +0100 Subject: [PATCH 1752/3326] KVM/x86: Update the reverse_cpuid list to include CPUID_7_EDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit b7b27aa011a1df42728d1768fc181d9ce69e6911 [dwmw2: Stop using KF() for bits in it, too] Signed-off-by: KarimAllah Ahmed Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Reviewed-by: Paolo Bonzini Reviewed-by: Konrad Rzeszutek Wilk Reviewed-by: Jim Mattson Cc: kvm@vger.kernel.org Cc: Radim Krčmář Link: https://lkml.kernel.org/r/1517522386-18410-2-git-send-email-karahmed@amazon.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/cpuid.c | 8 +++----- arch/x86/kvm/cpuid.h | 1 + 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 0099e10eb04525..c0eb337e7878e3 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -67,9 +67,7 @@ u64 kvm_supported_xcr0(void) #define F(x) bit(X86_FEATURE_##x) -/* These are scattered features in cpufeatures.h. */ -#define KVM_CPUID_BIT_AVX512_4VNNIW 2 -#define KVM_CPUID_BIT_AVX512_4FMAPS 3 +/* For scattered features from cpufeatures.h; we currently expose none */ #define KF(x) bit(KVM_CPUID_BIT_##x) int kvm_update_cpuid(struct kvm_vcpu *vcpu) @@ -392,7 +390,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 7.0.edx*/ const u32 kvm_cpuid_7_0_edx_x86_features = - KF(AVX512_4VNNIW) | KF(AVX512_4FMAPS); + F(AVX512_4VNNIW) | F(AVX512_4FMAPS); /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); @@ -477,7 +475,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) entry->ecx &= ~F(PKU); entry->edx &= kvm_cpuid_7_0_edx_x86_features; - entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX); + cpuid_mask(&entry->edx, CPUID_7_EDX); } else { entry->ebx = 0; entry->ecx = 0; diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index c2cea6651279f7..9a327d5b6d1f5b 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -54,6 +54,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX}, [CPUID_7_ECX] = { 7, 0, CPUID_ECX}, [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX}, + [CPUID_7_EDX] = { 7, 0, CPUID_EDX}, }; static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature) From d395d69de67ea95760e1f207eb0f6fdfbcb6e069 Mon Sep 17 00:00:00 2001 From: Ashok Raj Date: Thu, 1 Feb 2018 22:59:43 +0100 Subject: [PATCH 1753/3326] KVM/x86: Add IBPB support commit 15d45071523d89b3fb7372e2135fbd72f6af9506 The Indirect Branch Predictor Barrier (IBPB) is an indirect branch control mechanism. It keeps earlier branches from influencing later ones. Unlike IBRS and STIBP, IBPB does not define a new mode of operation. It's a command that ensures predicted branch targets aren't used after the barrier. Although IBRS and IBPB are enumerated by the same CPUID enumeration, IBPB is very different. IBPB helps mitigate against three potential attacks: * Mitigate guests from being attacked by other guests. - This is addressed by issing IBPB when we do a guest switch. * Mitigate attacks from guest/ring3->host/ring3. These would require a IBPB during context switch in host, or after VMEXIT. The host process has two ways to mitigate - Either it can be compiled with retpoline - If its going through context switch, and has set !dumpable then there is a IBPB in that path. (Tim's patch: https://patchwork.kernel.org/patch/10192871) - The case where after a VMEXIT you return back to Qemu might make Qemu attackable from guest when Qemu isn't compiled with retpoline. There are issues reported when doing IBPB on every VMEXIT that resulted in some tsc calibration woes in guest. * Mitigate guest/ring0->host/ring0 attacks. When host kernel is using retpoline it is safe against these attacks. If host kernel isn't using retpoline we might need to do a IBPB flush on every VMEXIT. Even when using retpoline for indirect calls, in certain conditions 'ret' can use the BTB on Skylake-era CPUs. There are other mitigations available like RSB stuffing/clearing. * IBPB is issued only for SVM during svm_free_vcpu(). VMX has a vmclear and SVM doesn't. Follow discussion here: https://lkml.org/lkml/2018/1/15/146 Please refer to the following spec for more details on the enumeration and control. Refer here to get documentation about mitigations. https://software.intel.com/en-us/side-channel-security-support [peterz: rebase and changelog rewrite] [karahmed: - rebase - vmx: expose PRED_CMD if guest has it in CPUID - svm: only pass through IBPB if guest has it in CPUID - vmx: support !cpu_has_vmx_msr_bitmap()] - vmx: support nested] [dwmw2: Expose CPUID bit too (AMD IBPB only for now as we lack IBRS) PRED_CMD is a write-only MSR] Signed-off-by: Ashok Raj Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: David Woodhouse Signed-off-by: KarimAllah Ahmed Signed-off-by: Thomas Gleixner Reviewed-by: Konrad Rzeszutek Wilk Cc: Andrea Arcangeli Cc: Andi Kleen Cc: kvm@vger.kernel.org Cc: Asit Mallick Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Dave Hansen Cc: Arjan Van De Ven Cc: Greg KH Cc: Jun Nakajima Cc: Paolo Bonzini Cc: Dan Williams Cc: Tim Chen Link: http://lkml.kernel.org/r/1515720739-43819-6-git-send-email-ashok.raj@intel.com Link: https://lkml.kernel.org/r/1517522386-18410-3-git-send-email-karahmed@amazon.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/cpuid.c | 11 +++++- arch/x86/kvm/svm.c | 28 ++++++++++++++++ arch/x86/kvm/vmx.c | 80 ++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 116 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index c0eb337e7878e3..033004de01e6f7 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -365,6 +365,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM); + /* cpuid 0x80000008.ebx */ + const u32 kvm_cpuid_8000_0008_ebx_x86_features = + F(IBPB); + /* cpuid 0xC0000001.edx */ const u32 kvm_cpuid_C000_0001_edx_x86_features = F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | @@ -625,7 +629,12 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, if (!g_phys_as) g_phys_as = phys_as; entry->eax = g_phys_as | (virt_as << 8); - entry->ebx = entry->edx = 0; + entry->edx = 0; + /* IBPB isn't necessarily present in hardware cpuid */ + if (boot_cpu_has(X86_FEATURE_IBPB)) + entry->ebx |= F(IBPB); + entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; + cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); break; } case 0x80000019: diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 6a8284f7232830..bc310335baa66e 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -249,6 +249,7 @@ static const struct svm_direct_access_msrs { { .index = MSR_CSTAR, .always = true }, { .index = MSR_SYSCALL_MASK, .always = true }, #endif + { .index = MSR_IA32_PRED_CMD, .always = false }, { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, { .index = MSR_IA32_LASTINTFROMIP, .always = false }, @@ -529,6 +530,7 @@ struct svm_cpu_data { struct kvm_ldttss_desc *tss_desc; struct page *save_area; + struct vmcb *current_vmcb; }; static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); @@ -1706,11 +1708,17 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm); + /* + * The vmcb page can be recycled, causing a false negative in + * svm_vcpu_load(). So do a full IBPB now. + */ + indirect_branch_prediction_barrier(); } static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_svm *svm = to_svm(vcpu); + struct svm_cpu_data *sd = per_cpu(svm_data, cpu); int i; if (unlikely(cpu != vcpu->cpu)) { @@ -1739,6 +1747,10 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (static_cpu_has(X86_FEATURE_RDTSCP)) wrmsrl(MSR_TSC_AUX, svm->tsc_aux); + if (sd->current_vmcb != svm->vmcb) { + sd->current_vmcb = svm->vmcb; + indirect_branch_prediction_barrier(); + } avic_vcpu_load(vcpu, cpu); } @@ -3670,6 +3682,22 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; + case MSR_IA32_PRED_CMD: + if (!msr->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBPB)) + return 1; + + if (data & ~PRED_CMD_IBPB) + return 1; + + if (!data) + break; + + wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); + if (is_guest_mode(vcpu)) + break; + set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); + break; case MSR_STAR: svm->vmcb->save.star = data; break; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 93e25e6768008d..196f324eb23aba 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -582,6 +582,7 @@ struct vcpu_vmx { u64 msr_host_kernel_gs_base; u64 msr_guest_kernel_gs_base; #endif + u32 vm_entry_controls_shadow; u32 vm_exit_controls_shadow; u32 secondary_exec_control; @@ -926,6 +927,8 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, u16 error_code); static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); +static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); @@ -1900,6 +1903,29 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) vmcs_write32(EXCEPTION_BITMAP, eb); } +/* + * Check if MSR is intercepted for L01 MSR bitmap. + */ +static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr) +{ + unsigned long *msr_bitmap; + int f = sizeof(unsigned long); + + if (!cpu_has_vmx_msr_bitmap()) + return true; + + msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; + + if (msr <= 0x1fff) { + return !!test_bit(msr, msr_bitmap + 0x800 / f); + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + return !!test_bit(msr, msr_bitmap + 0xc00 / f); + } + + return true; +} + static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit) { @@ -2278,6 +2304,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; vmcs_load(vmx->loaded_vmcs->vmcs); + indirect_branch_prediction_barrier(); } if (!already_loaded) { @@ -3337,6 +3364,34 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr_info); break; + case MSR_IA32_PRED_CMD: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) && + !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + return 1; + + if (data & ~PRED_CMD_IBPB) + return 1; + + if (!data) + break; + + wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); + + /* + * For non-nested: + * When it's written (to non-zero) for the first time, pass + * it through. + * + * For nested: + * The handling of the MSR bitmap for L2 guests is done in + * nested_vmx_merge_msr_bitmap. We should not touch the + * vmcs02.msr_bitmap here since it gets completely overwritten + * in the merging. + */ + vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, + MSR_TYPE_W); + break; case MSR_IA32_CR_PAT: if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) @@ -10038,9 +10093,23 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, struct page *page; unsigned long *msr_bitmap_l1; unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; + /* + * pred_cmd is trying to verify two things: + * + * 1. L0 gave a permission to L1 to actually passthrough the MSR. This + * ensures that we do not accidentally generate an L02 MSR bitmap + * from the L12 MSR bitmap that is too permissive. + * 2. That L1 or L2s have actually used the MSR. This avoids + * unnecessarily merging of the bitmap if the MSR is unused. This + * works properly because we only update the L01 MSR bitmap lazily. + * So even if L0 should pass L1 these MSRs, the L01 bitmap is only + * updated to reflect this when L1 (or its L2s) actually write to + * the MSR. + */ + bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); - /* This shortcut is ok because we support only x2APIC MSRs so far. */ - if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) + if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && + !pred_cmd) return false; page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap); @@ -10073,6 +10142,13 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, MSR_TYPE_W); } } + + if (pred_cmd) + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + MSR_IA32_PRED_CMD, + MSR_TYPE_W); + kunmap(page); kvm_release_page_clean(page); From 0716f551d5a29dc3abb128996aa02269b527fa45 Mon Sep 17 00:00:00 2001 From: KarimAllah Ahmed Date: Thu, 1 Feb 2018 22:59:44 +0100 Subject: [PATCH 1754/3326] KVM/VMX: Emulate MSR_IA32_ARCH_CAPABILITIES commit 28c1c9fabf48d6ad596273a11c46e0d0da3e14cd Intel processors use MSR_IA32_ARCH_CAPABILITIES MSR to indicate RDCL_NO (bit 0) and IBRS_ALL (bit 1). This is a read-only MSR. By default the contents will come directly from the hardware, but user-space can still override it. [dwmw2: The bit in kvm_cpuid_7_0_edx_x86_features can be unconditional] Signed-off-by: KarimAllah Ahmed Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Reviewed-by: Paolo Bonzini Reviewed-by: Darren Kenny Reviewed-by: Jim Mattson Reviewed-by: Konrad Rzeszutek Wilk Cc: Andrea Arcangeli Cc: Andi Kleen Cc: Jun Nakajima Cc: kvm@vger.kernel.org Cc: Dave Hansen Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Asit Mallick Cc: Arjan Van De Ven Cc: Greg KH Cc: Dan Williams Cc: Tim Chen Cc: Ashok Raj Link: https://lkml.kernel.org/r/1517522386-18410-4-git-send-email-karahmed@amazon.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/cpuid.c | 2 +- arch/x86/kvm/vmx.c | 15 +++++++++++++++ arch/x86/kvm/x86.c | 1 + 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 033004de01e6f7..1909635fe09c9f 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -394,7 +394,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 7.0.edx*/ const u32 kvm_cpuid_7_0_edx_x86_features = - F(AVX512_4VNNIW) | F(AVX512_4FMAPS); + F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(ARCH_CAPABILITIES); /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 196f324eb23aba..a66682ac2d1635 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -583,6 +583,8 @@ struct vcpu_vmx { u64 msr_guest_kernel_gs_base; #endif + u64 arch_capabilities; + u32 vm_entry_controls_shadow; u32 vm_exit_controls_shadow; u32 secondary_exec_control; @@ -3257,6 +3259,12 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_TSC: msr_info->data = guest_read_tsc(vcpu); break; + case MSR_IA32_ARCH_CAPABILITIES: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) + return 1; + msr_info->data = to_vmx(vcpu)->arch_capabilities; + break; case MSR_IA32_SYSENTER_CS: msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); break; @@ -3392,6 +3400,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, MSR_TYPE_W); break; + case MSR_IA32_ARCH_CAPABILITIES: + if (!msr_info->host_initiated) + return 1; + vmx->arch_capabilities = data; + break; case MSR_IA32_CR_PAT: if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) @@ -5652,6 +5665,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ++vmx->nmsrs; } + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, vmx->arch_capabilities); vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8c28023a43b132..6fb112071f7e17 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1006,6 +1006,7 @@ static u32 msrs_to_save[] = { #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, + MSR_IA32_ARCH_CAPABILITIES }; static unsigned num_msrs_to_save; From 4a82531c96a20ab008e7f5a903e85d46a81fd3a3 Mon Sep 17 00:00:00 2001 From: KarimAllah Ahmed Date: Thu, 1 Feb 2018 22:59:45 +0100 Subject: [PATCH 1755/3326] KVM/VMX: Allow direct access to MSR_IA32_SPEC_CTRL commit d28b387fb74da95d69d2615732f50cceb38e9a4d [ Based on a patch from Ashok Raj ] Add direct access to MSR_IA32_SPEC_CTRL for guests. This is needed for guests that will only mitigate Spectre V2 through IBRS+IBPB and will not be using a retpoline+IBPB based approach. To avoid the overhead of saving and restoring the MSR_IA32_SPEC_CTRL for guests that do not actually use the MSR, only start saving and restoring when a non-zero is written to it. No attempt is made to handle STIBP here, intentionally. Filtering STIBP may be added in a future patch, which may require trapping all writes if we don't want to pass it through directly to the guest. [dwmw2: Clean up CPUID bits, save/restore manually, handle reset] Signed-off-by: KarimAllah Ahmed Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Reviewed-by: Darren Kenny Reviewed-by: Konrad Rzeszutek Wilk Reviewed-by: Jim Mattson Cc: Andrea Arcangeli Cc: Andi Kleen Cc: Jun Nakajima Cc: kvm@vger.kernel.org Cc: Dave Hansen Cc: Tim Chen Cc: Andy Lutomirski Cc: Asit Mallick Cc: Arjan Van De Ven Cc: Greg KH Cc: Paolo Bonzini Cc: Dan Williams Cc: Linus Torvalds Cc: Ashok Raj Link: https://lkml.kernel.org/r/1517522386-18410-5-git-send-email-karahmed@amazon.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/cpuid.c | 9 ++-- arch/x86/kvm/vmx.c | 105 ++++++++++++++++++++++++++++++++++++++++++- arch/x86/kvm/x86.c | 2 +- 3 files changed, 110 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 1909635fe09c9f..13f5d4217e4f1c 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -367,7 +367,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 0x80000008.ebx */ const u32 kvm_cpuid_8000_0008_ebx_x86_features = - F(IBPB); + F(IBPB) | F(IBRS); /* cpuid 0xC0000001.edx */ const u32 kvm_cpuid_C000_0001_edx_x86_features = @@ -394,7 +394,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 7.0.edx*/ const u32 kvm_cpuid_7_0_edx_x86_features = - F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(ARCH_CAPABILITIES); + F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | + F(ARCH_CAPABILITIES); /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); @@ -630,9 +631,11 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, g_phys_as = phys_as; entry->eax = g_phys_as | (virt_as << 8); entry->edx = 0; - /* IBPB isn't necessarily present in hardware cpuid */ + /* IBRS and IBPB aren't necessarily present in hardware cpuid */ if (boot_cpu_has(X86_FEATURE_IBPB)) entry->ebx |= F(IBPB); + if (boot_cpu_has(X86_FEATURE_IBRS)) + entry->ebx |= F(IBRS); entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); break; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a66682ac2d1635..0ae4b1a861683c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -584,6 +584,7 @@ struct vcpu_vmx { #endif u64 arch_capabilities; + u64 spec_ctrl; u32 vm_entry_controls_shadow; u32 vm_exit_controls_shadow; @@ -1905,6 +1906,29 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) vmcs_write32(EXCEPTION_BITMAP, eb); } +/* + * Check if MSR is intercepted for currently loaded MSR bitmap. + */ +static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) +{ + unsigned long *msr_bitmap; + int f = sizeof(unsigned long); + + if (!cpu_has_vmx_msr_bitmap()) + return true; + + msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap; + + if (msr <= 0x1fff) { + return !!test_bit(msr, msr_bitmap + 0x800 / f); + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + return !!test_bit(msr, msr_bitmap + 0xc00 / f); + } + + return true; +} + /* * Check if MSR is intercepted for L01 MSR bitmap. */ @@ -3259,6 +3283,14 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_TSC: msr_info->data = guest_read_tsc(vcpu); break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && + !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + return 1; + + msr_info->data = to_vmx(vcpu)->spec_ctrl; + break; case MSR_IA32_ARCH_CAPABILITIES: if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) @@ -3372,6 +3404,37 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr_info); break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && + !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + return 1; + + /* The STIBP bit doesn't fault even if it's not advertised */ + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) + return 1; + + vmx->spec_ctrl = data; + + if (!data) + break; + + /* + * For non-nested: + * When it's written (to non-zero) for the first time, pass + * it through. + * + * For nested: + * The handling of the MSR bitmap for L2 guests is done in + * nested_vmx_merge_msr_bitmap. We should not touch the + * vmcs02.msr_bitmap here since it gets completely overwritten + * in the merging. We update the vmcs01 here for L1 as well + * since it will end up touching the MSR anyway now. + */ + vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, + MSR_IA32_SPEC_CTRL, + MSR_TYPE_RW); + break; case MSR_IA32_PRED_CMD: if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) && @@ -5697,6 +5760,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u64 cr0; vmx->rmode.vm86_active = 0; + vmx->spec_ctrl = 0; vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); kvm_set_cr8(vcpu, 0); @@ -9360,6 +9424,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx_arm_hv_timer(vcpu); + /* + * If this vCPU has touched SPEC_CTRL, restore the guest's value if + * it's non-zero. Since vmentry is serialising on affected CPUs, there + * is no need to worry about the conditional branch over the wrmsr + * being speculatively taken. + */ + if (vmx->spec_ctrl) + wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); + vmx->__launched = vmx->loaded_vmcs->launched; asm( /* Store host registers */ @@ -9478,6 +9551,27 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* + * We do not use IBRS in the kernel. If this vCPU has used the + * SPEC_CTRL MSR it may have left it on; save the value and + * turn it off. This is much more efficient than blindly adding + * it to the atomic save/restore list. Especially as the former + * (Saving guest MSRs on vmexit) doesn't even exist in KVM. + * + * For non-nested case: + * If the L01 MSR bitmap does not intercept the MSR, then we need to + * save it. + * + * For nested case: + * If the L02 MSR bitmap does not intercept the MSR, then we need to + * save it. + */ + if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) + rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); + + if (vmx->spec_ctrl) + wrmsrl(MSR_IA32_SPEC_CTRL, 0); + /* Eliminate branch target predictions from guest mode */ vmexit_fill_RSB(); @@ -10109,7 +10203,7 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, unsigned long *msr_bitmap_l1; unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; /* - * pred_cmd is trying to verify two things: + * pred_cmd & spec_ctrl are trying to verify two things: * * 1. L0 gave a permission to L1 to actually passthrough the MSR. This * ensures that we do not accidentally generate an L02 MSR bitmap @@ -10122,9 +10216,10 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, * the MSR. */ bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); + bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && - !pred_cmd) + !pred_cmd && !spec_ctrl) return false; page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap); @@ -10158,6 +10253,12 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, } } + if (spec_ctrl) + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + MSR_IA32_SPEC_CTRL, + MSR_TYPE_R | MSR_TYPE_W); + if (pred_cmd) nested_vmx_disable_intercept_for_msr( msr_bitmap_l1, msr_bitmap_l0, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6fb112071f7e17..f97358423f9c48 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1006,7 +1006,7 @@ static u32 msrs_to_save[] = { #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, - MSR_IA32_ARCH_CAPABILITIES + MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES }; static unsigned num_msrs_to_save; From f6d90612c96c99d98f204c7641becfd7295a6fc2 Mon Sep 17 00:00:00 2001 From: KarimAllah Ahmed Date: Sat, 3 Feb 2018 15:56:23 +0100 Subject: [PATCH 1756/3326] KVM/SVM: Allow direct access to MSR_IA32_SPEC_CTRL commit b2ac58f90540e39324e7a29a7ad471407ae0bf48 [ Based on a patch from Paolo Bonzini ] ... basically doing exactly what we do for VMX: - Passthrough SPEC_CTRL to guests (if enabled in guest CPUID) - Save and restore SPEC_CTRL around VMExit and VMEntry only if the guest actually used it. Signed-off-by: KarimAllah Ahmed Signed-off-by: David Woodhouse Signed-off-by: Thomas Gleixner Reviewed-by: Darren Kenny Reviewed-by: Konrad Rzeszutek Wilk Cc: Andrea Arcangeli Cc: Andi Kleen Cc: Jun Nakajima Cc: kvm@vger.kernel.org Cc: Dave Hansen Cc: Tim Chen Cc: Andy Lutomirski Cc: Asit Mallick Cc: Arjan Van De Ven Cc: Greg KH Cc: Paolo Bonzini Cc: Dan Williams Cc: Linus Torvalds Cc: Ashok Raj Link: https://lkml.kernel.org/r/1517669783-20732-1-git-send-email-karahmed@amazon.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/svm.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index bc310335baa66e..e0bc3ad0f6cd8f 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -184,6 +184,8 @@ struct vcpu_svm { u64 gs_base; } host; + u64 spec_ctrl; + u32 *msrpm; ulong nmi_iret_rip; @@ -249,6 +251,7 @@ static const struct svm_direct_access_msrs { { .index = MSR_CSTAR, .always = true }, { .index = MSR_SYSCALL_MASK, .always = true }, #endif + { .index = MSR_IA32_SPEC_CTRL, .always = false }, { .index = MSR_IA32_PRED_CMD, .always = false }, { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, @@ -882,6 +885,25 @@ static bool valid_msr_intercept(u32 index) return false; } +static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr) +{ + u8 bit_write; + unsigned long tmp; + u32 offset; + u32 *msrpm; + + msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm: + to_svm(vcpu)->msrpm; + + offset = svm_msrpm_offset(msr); + bit_write = 2 * (msr & 0x0f) + 1; + tmp = msrpm[offset]; + + BUG_ON(offset == MSR_INVALID); + + return !!test_bit(bit_write, &tmp); +} + static void set_msr_interception(u32 *msrpm, unsigned msr, int read, int write) { @@ -1587,6 +1609,8 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u32 dummy; u32 eax = 1; + svm->spec_ctrl = 0; + if (!init_event) { svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; @@ -3591,6 +3615,13 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_VM_CR: msr_info->data = svm->nested.vm_cr_msr; break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) + return 1; + + msr_info->data = svm->spec_ctrl; + break; case MSR_IA32_UCODE_REV: msr_info->data = 0x01000065; break; @@ -3682,6 +3713,33 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; + case MSR_IA32_SPEC_CTRL: + if (!msr->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) + return 1; + + /* The STIBP bit doesn't fault even if it's not advertised */ + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) + return 1; + + svm->spec_ctrl = data; + + if (!data) + break; + + /* + * For non-nested: + * When it's written (to non-zero) for the first time, pass + * it through. + * + * For nested: + * The handling of the MSR bitmap for L2 guests is done in + * nested_svm_vmrun_msrpm. + * We update the L1 MSR bit as well since it will end up + * touching the MSR anyway now. + */ + set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); + break; case MSR_IA32_PRED_CMD: if (!msr->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_IBPB)) @@ -4950,6 +5008,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) local_irq_enable(); + /* + * If this vCPU has touched SPEC_CTRL, restore the guest's value if + * it's non-zero. Since vmentry is serialising on affected CPUs, there + * is no need to worry about the conditional branch over the wrmsr + * being speculatively taken. + */ + if (svm->spec_ctrl) + wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" @@ -5042,6 +5109,27 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* + * We do not use IBRS in the kernel. If this vCPU has used the + * SPEC_CTRL MSR it may have left it on; save the value and + * turn it off. This is much more efficient than blindly adding + * it to the atomic save/restore list. Especially as the former + * (Saving guest MSRs on vmexit) doesn't even exist in KVM. + * + * For non-nested case: + * If the L01 MSR bitmap does not intercept the MSR, then we need to + * save it. + * + * For nested case: + * If the L02 MSR bitmap does not intercept the MSR, then we need to + * save it. + */ + if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) + rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + + if (svm->spec_ctrl) + wrmsrl(MSR_IA32_SPEC_CTRL, 0); + /* Eliminate branch target predictions from guest mode */ vmexit_fill_RSB(); From a252f37c5f8b998ccd419d1562a3c069890eb1d4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 11 Jan 2018 18:57:26 +0100 Subject: [PATCH 1757/3326] serial: core: mark port as initialized after successful IRQ change commit 44117a1d1732c513875d5a163f10d9adbe866c08 upstream. setserial changes the IRQ via uart_set_info(). It invokes uart_shutdown() which free the current used IRQ and clear TTY_PORT_INITIALIZED. It will then update the IRQ number and invoke uart_startup() before returning to the caller leaving TTY_PORT_INITIALIZED cleared. The next open will crash with | list_add double add: new=ffffffff839fcc98, prev=ffffffff839fcc98, next=ffffffff839fcc98. since the close from the IOCTL won't free the IRQ (and clean the list) due to the TTY_PORT_INITIALIZED check in uart_shutdown(). There is same pattern in uart_do_autoconfig() and I *think* it also needs to set TTY_PORT_INITIALIZED there. Is there a reason why uart_startup() does not set the flag by itself after the IRQ has been acquired (since it is cleared in uart_shutdown)? Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/serial_core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 3a14cccbd7fffd..7948acf146018a 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -987,6 +987,8 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, } } else { retval = uart_startup(tty, state, 1); + if (retval == 0) + tty_port_set_initialized(port, true); if (retval > 0) retval = 0; } From 8d1ed7d4e1ce6d4d08676f5b2bb1a4e83b3ad5f7 Mon Sep 17 00:00:00 2001 From: Ian Abbott Date: Wed, 15 Nov 2017 16:33:12 -0600 Subject: [PATCH 1758/3326] fpga: region: release of_parse_phandle nodes after use commit 0f5eb1545907edeea7672a9c1652c4231150ff22 upstream. Both fpga_region_get_manager() and fpga_region_get_bridges() call of_parse_phandle(), but nothing calls of_node_put() on the returned struct device_node pointers. Make sure to do that to stop their reference counters getting out of whack. Fixes: 0fa20cdfcc1f ("fpga: fpga-region: device tree control for FPGA") Signed-off-by: Ian Abbott Signed-off-by: Alan Tull Signed-off-by: Greg Kroah-Hartman --- drivers/fpga/fpga-region.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c index d9ab7c75b14f49..e0c73ceba2ed16 100644 --- a/drivers/fpga/fpga-region.c +++ b/drivers/fpga/fpga-region.c @@ -147,6 +147,7 @@ static struct fpga_manager *fpga_region_get_manager(struct fpga_region *region) mgr_node = of_parse_phandle(np, "fpga-mgr", 0); if (mgr_node) { mgr = of_fpga_mgr_get(mgr_node); + of_node_put(mgr_node); of_node_put(np); return mgr; } @@ -192,10 +193,13 @@ static int fpga_region_get_bridges(struct fpga_region *region, parent_br = region_np->parent; /* If overlay has a list of bridges, use it. */ - if (of_parse_phandle(overlay, "fpga-bridges", 0)) + br = of_parse_phandle(overlay, "fpga-bridges", 0); + if (br) { + of_node_put(br); np = overlay; - else + } else { np = region_np; + } for (i = 0; ; i++) { br = of_parse_phandle(np, "fpga-bridges", i); @@ -203,12 +207,15 @@ static int fpga_region_get_bridges(struct fpga_region *region, break; /* If parent bridge is in list, skip it. */ - if (br == parent_br) + if (br == parent_br) { + of_node_put(br); continue; + } /* If node is a bridge, get it and add to list */ ret = fpga_bridge_get_to_list(br, region->info, ®ion->bridge_list); + of_node_put(br); /* If any of the bridges are in use, give up */ if (ret == -EBUSY) { From 81d0cc85caabe062991ea45ddada814835d47fb0 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 7 Feb 2018 11:12:26 -0800 Subject: [PATCH 1759/3326] Linux 4.14.18 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7ed993896dd54c..a69e5da9ed86a0 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 17 +SUBLEVEL = 18 EXTRAVERSION = NAME = Petit Gorille From bafda5d375270277a140d3b70f0ea3c5fe6aacf8 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 31 Oct 2017 00:33:45 +0900 Subject: [PATCH 1760/3326] .gitignore: sort normal pattern rules alphabetically commit 1377dd3e29878b8f5d9f5c9000975f50a428a0cd upstream. We are having more and more ignore patterns. Sort the list alphabetically. We will easily catch duplicated patterns if any. Signed-off-by: Masahiro Yamada Signed-off-by: Rob Herring Signed-off-by: Greg Kroah-Hartman --- .gitignore | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/.gitignore b/.gitignore index 0c39aa20b6ba8d..435eeeb81efa3d 100644 --- a/.gitignore +++ b/.gitignore @@ -7,38 +7,38 @@ # command after changing this file, to see if there are # any tracked files which get ignored after the change. # -# Normal rules +# Normal rules (sorted alphabetically) # .* +*.a +*.bin +*.bz2 +*.c.[012]*.* +*.dwo +*.elf +*.gcno +*.gz +*.i +*.ko +*.ll +*.lst +*.lz4 +*.lzma +*.lzo +*.mod.c *.o *.o.* -*.a +*.order +*.patch *.s -*.ko *.so *.so.dbg -*.mod.c -*.i -*.lst +*.su *.symtypes -*.order -*.elf -*.bin *.tar -*.gz -*.bz2 -*.lzma *.xz -*.lz4 -*.lzo -*.patch -*.gcno -*.ll -modules.builtin Module.symvers -*.dwo -*.su -*.c.[012]*.* +modules.builtin # # Top-level generic files From abc5896b778581fb2396f018a82334c701a1c261 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 31 Oct 2017 00:33:46 +0900 Subject: [PATCH 1761/3326] .gitignore: move *.dtb and *.dtb.S patterns to the top-level .gitignore commit 10b62a2f785ab55857380f0c63d9fa468fd8c676 upstream. Most of DT files are compiled under arch/*/boot/dts/, but we have some other directories, like drivers/of/unittest-data/. We often miss to add gitignore patterns per directory. Since there are no source files that end with .dtb or .dtb.S, we can ignore the patterns globally. Signed-off-by: Masahiro Yamada Signed-off-by: Rob Herring Signed-off-by: Greg Kroah-Hartman --- .gitignore | 2 ++ arch/arc/boot/.gitignore | 1 - arch/arm/boot/.gitignore | 1 - arch/arm64/boot/dts/.gitignore | 1 - arch/metag/boot/.gitignore | 1 - arch/microblaze/boot/.gitignore | 1 - arch/mips/boot/.gitignore | 1 - arch/nios2/boot/.gitignore | 1 - arch/powerpc/boot/.gitignore | 1 - arch/xtensa/boot/.gitignore | 1 - drivers/of/unittest-data/.gitignore | 2 -- 11 files changed, 2 insertions(+), 11 deletions(-) delete mode 100644 arch/arm64/boot/dts/.gitignore delete mode 100644 drivers/of/unittest-data/.gitignore diff --git a/.gitignore b/.gitignore index 435eeeb81efa3d..6c119eab5d4692 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,8 @@ *.bin *.bz2 *.c.[012]*.* +*.dtb +*.dtb.S *.dwo *.elf *.gcno diff --git a/arch/arc/boot/.gitignore b/arch/arc/boot/.gitignore index 5246969a20c5fb..c4c5fd529c251d 100644 --- a/arch/arc/boot/.gitignore +++ b/arch/arc/boot/.gitignore @@ -1,2 +1 @@ -*.dtb* uImage diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore index 3c79f85975aaa2..ce1c5ff746e7d2 100644 --- a/arch/arm/boot/.gitignore +++ b/arch/arm/boot/.gitignore @@ -3,4 +3,3 @@ zImage xipImage bootpImage uImage -*.dtb diff --git a/arch/arm64/boot/dts/.gitignore b/arch/arm64/boot/dts/.gitignore deleted file mode 100644 index b60ed208c779e0..00000000000000 --- a/arch/arm64/boot/dts/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.dtb diff --git a/arch/metag/boot/.gitignore b/arch/metag/boot/.gitignore index 2d6c0c16088439..6c662ddb909aae 100644 --- a/arch/metag/boot/.gitignore +++ b/arch/metag/boot/.gitignore @@ -1,4 +1,3 @@ vmlinux* uImage* ramdisk.* -*.dtb* diff --git a/arch/microblaze/boot/.gitignore b/arch/microblaze/boot/.gitignore index bf045918602772..679502d64a9768 100644 --- a/arch/microblaze/boot/.gitignore +++ b/arch/microblaze/boot/.gitignore @@ -1,3 +1,2 @@ -*.dtb linux.bin* simpleImage.* diff --git a/arch/mips/boot/.gitignore b/arch/mips/boot/.gitignore index d3962cd5ce0c12..a73d6e2c4f64fe 100644 --- a/arch/mips/boot/.gitignore +++ b/arch/mips/boot/.gitignore @@ -5,4 +5,3 @@ zImage zImage.tmp calc_vmlinuz_load_addr uImage -*.dtb diff --git a/arch/nios2/boot/.gitignore b/arch/nios2/boot/.gitignore index 109279ca5a4de5..64386a8dedd8b7 100644 --- a/arch/nios2/boot/.gitignore +++ b/arch/nios2/boot/.gitignore @@ -1,2 +1 @@ -*.dtb vmImage diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore index 84774ccba1c27b..f92d0530ceb119 100644 --- a/arch/powerpc/boot/.gitignore +++ b/arch/powerpc/boot/.gitignore @@ -18,7 +18,6 @@ otheros.bld uImage cuImage.* dtbImage.* -*.dtb treeImage.* vmlinux.strip zImage diff --git a/arch/xtensa/boot/.gitignore b/arch/xtensa/boot/.gitignore index be7655998b26ca..38177c7ebcabc9 100644 --- a/arch/xtensa/boot/.gitignore +++ b/arch/xtensa/boot/.gitignore @@ -1,3 +1,2 @@ uImage zImage.redboot -*.dtb diff --git a/drivers/of/unittest-data/.gitignore b/drivers/of/unittest-data/.gitignore deleted file mode 100644 index 4b3cf8b16de298..00000000000000 --- a/drivers/of/unittest-data/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -testcases.dtb -testcases.dtb.S From 806d61d66900b999c908823471ea9ff64cf7344e Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 30 Sep 2017 10:10:10 +0900 Subject: [PATCH 1762/3326] kbuild: rpm-pkg: keep spec file until make mrproper commit af60e207087975d069858741c44ed4f450330ac4 upstream. If build fails during (bin)rpm-pkg, the spec file is not cleaned by anyone until the next successful build of the package. We do not have to immediately delete the spec file in case somebody may want to take a look at it. Instead, make them ignored by git, and cleaned up by make mrproper. Signed-off-by: Masahiro Yamada Signed-off-by: Greg Kroah-Hartman --- .gitignore | 5 +++++ scripts/package/Makefile | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 6c119eab5d4692..f6050b88e95b5b 100644 --- a/.gitignore +++ b/.gitignore @@ -55,6 +55,11 @@ modules.builtin /System.map /Module.markers +# +# RPM spec file (make rpm-pkg) +# +/*.spec + # # Debian directory (make deb-pkg) # diff --git a/scripts/package/Makefile b/scripts/package/Makefile index 34de8b953ecfc4..348af5b20618ea 100644 --- a/scripts/package/Makefile +++ b/scripts/package/Makefile @@ -50,7 +50,6 @@ rpm-pkg rpm: FORCE $(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec $(call cmd,src_tar,$(KERNELPATH),kernel.spec) +rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz - rm $(KERNELPATH).tar.gz kernel.spec # binrpm-pkg # --------------------------------------------------------------------------- @@ -59,7 +58,8 @@ binrpm-pkg: FORCE $(CONFIG_SHELL) $(MKSPEC) prebuilt > $(objtree)/binkernel.spec +rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \ $(UTS_MACHINE) -bb $(objtree)/binkernel.spec - rm binkernel.spec + +clean-files += $(objtree)/*.spec # Deb target # --------------------------------------------------------------------------- From 2726946dfcd2b2f1863dfae198687c6ec6d976dd Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Wed, 31 Jan 2018 16:29:30 +0200 Subject: [PATCH 1763/3326] ip6mr: fix stale iterator [ Upstream commit 4adfa79fc254efb7b0eb3cd58f62c2c3f805f1ba ] When we dump the ip6mr mfc entries via proc, we initialize an iterator with the table to dump but we don't clear the cache pointer which might be initialized from a prior read on the same descriptor that ended. This can result in lock imbalance (an unnecessary unlock) leading to other crashes and hangs. Clear the cache pointer like ipmr does to fix the issue. Thanks for the reliable reproducer. Here's syzbot's trace: WARNING: bad unlock balance detected! 4.15.0-rc3+ #128 Not tainted syzkaller971460/3195 is trying to release lock (mrt_lock) at: [<000000006898068d>] ipmr_mfc_seq_stop+0xe1/0x130 net/ipv6/ip6mr.c:553 but there are no more locks to release! other info that might help us debug this: 1 lock held by syzkaller971460/3195: #0: (&p->lock){+.+.}, at: [<00000000744a6565>] seq_read+0xd5/0x13d0 fs/seq_file.c:165 stack backtrace: CPU: 1 PID: 3195 Comm: syzkaller971460 Not tainted 4.15.0-rc3+ #128 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:17 [inline] dump_stack+0x194/0x257 lib/dump_stack.c:53 print_unlock_imbalance_bug+0x12f/0x140 kernel/locking/lockdep.c:3561 __lock_release kernel/locking/lockdep.c:3775 [inline] lock_release+0x5f9/0xda0 kernel/locking/lockdep.c:4023 __raw_read_unlock include/linux/rwlock_api_smp.h:225 [inline] _raw_read_unlock+0x1a/0x30 kernel/locking/spinlock.c:255 ipmr_mfc_seq_stop+0xe1/0x130 net/ipv6/ip6mr.c:553 traverse+0x3bc/0xa00 fs/seq_file.c:135 seq_read+0x96a/0x13d0 fs/seq_file.c:189 proc_reg_read+0xef/0x170 fs/proc/inode.c:217 do_loop_readv_writev fs/read_write.c:673 [inline] do_iter_read+0x3db/0x5b0 fs/read_write.c:897 compat_readv+0x1bf/0x270 fs/read_write.c:1140 do_compat_preadv64+0xdc/0x100 fs/read_write.c:1189 C_SYSC_preadv fs/read_write.c:1209 [inline] compat_SyS_preadv+0x3b/0x50 fs/read_write.c:1203 do_syscall_32_irqs_on arch/x86/entry/common.c:327 [inline] do_fast_syscall_32+0x3ee/0xf9d arch/x86/entry/common.c:389 entry_SYSENTER_compat+0x51/0x60 arch/x86/entry/entry_64_compat.S:125 RIP: 0023:0xf7f73c79 RSP: 002b:00000000e574a15c EFLAGS: 00000292 ORIG_RAX: 000000000000014d RAX: ffffffffffffffda RBX: 000000000000000f RCX: 0000000020a3afb0 RDX: 0000000000000001 RSI: 0000000000000067 RDI: 0000000000000000 RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 BUG: sleeping function called from invalid context at lib/usercopy.c:25 in_atomic(): 1, irqs_disabled(): 0, pid: 3195, name: syzkaller971460 INFO: lockdep is turned off. CPU: 1 PID: 3195 Comm: syzkaller971460 Not tainted 4.15.0-rc3+ #128 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:17 [inline] dump_stack+0x194/0x257 lib/dump_stack.c:53 ___might_sleep+0x2b2/0x470 kernel/sched/core.c:6060 __might_sleep+0x95/0x190 kernel/sched/core.c:6013 __might_fault+0xab/0x1d0 mm/memory.c:4525 _copy_to_user+0x2c/0xc0 lib/usercopy.c:25 copy_to_user include/linux/uaccess.h:155 [inline] seq_read+0xcb4/0x13d0 fs/seq_file.c:279 proc_reg_read+0xef/0x170 fs/proc/inode.c:217 do_loop_readv_writev fs/read_write.c:673 [inline] do_iter_read+0x3db/0x5b0 fs/read_write.c:897 compat_readv+0x1bf/0x270 fs/read_write.c:1140 do_compat_preadv64+0xdc/0x100 fs/read_write.c:1189 C_SYSC_preadv fs/read_write.c:1209 [inline] compat_SyS_preadv+0x3b/0x50 fs/read_write.c:1203 do_syscall_32_irqs_on arch/x86/entry/common.c:327 [inline] do_fast_syscall_32+0x3ee/0xf9d arch/x86/entry/common.c:389 entry_SYSENTER_compat+0x51/0x60 arch/x86/entry/entry_64_compat.S:125 RIP: 0023:0xf7f73c79 RSP: 002b:00000000e574a15c EFLAGS: 00000292 ORIG_RAX: 000000000000014d RAX: ffffffffffffffda RBX: 000000000000000f RCX: 0000000020a3afb0 RDX: 0000000000000001 RSI: 0000000000000067 RDI: 0000000000000000 RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 WARNING: CPU: 1 PID: 3195 at lib/usercopy.c:26 _copy_to_user+0xb5/0xc0 lib/usercopy.c:26 Reported-by: syzbot Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6mr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index f5500f5444e984..e1060f28410deb 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -496,6 +496,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) return ERR_PTR(-ENOENT); it->mrt = mrt; + it->cache = NULL; return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) : SEQ_START_TOKEN; } From 166f27322f8c89a3580a30c339ff34a66544500c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 1 Feb 2018 10:26:57 -0800 Subject: [PATCH 1764/3326] net: igmp: add a missing rcu locking section [ Upstream commit e7aadb27a5415e8125834b84a74477bfbee4eff5 ] Newly added igmpv3_get_srcaddr() needs to be called under rcu lock. Timer callbacks do not ensure this locking. ============================= WARNING: suspicious RCU usage 4.15.0+ #200 Not tainted ----------------------------- ./include/linux/inetdevice.h:216 suspicious rcu_dereference_check() usage! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 3 locks held by syzkaller616973/4074: #0: (&mm->mmap_sem){++++}, at: [<00000000bfce669e>] __do_page_fault+0x32d/0xc90 arch/x86/mm/fault.c:1355 #1: ((&im->timer)){+.-.}, at: [<00000000619d2f71>] lockdep_copy_map include/linux/lockdep.h:178 [inline] #1: ((&im->timer)){+.-.}, at: [<00000000619d2f71>] call_timer_fn+0x1c6/0x820 kernel/time/timer.c:1316 #2: (&(&im->lock)->rlock){+.-.}, at: [<000000005f833c5c>] spin_lock_bh include/linux/spinlock.h:315 [inline] #2: (&(&im->lock)->rlock){+.-.}, at: [<000000005f833c5c>] igmpv3_send_report+0x98/0x5b0 net/ipv4/igmp.c:600 stack backtrace: CPU: 0 PID: 4074 Comm: syzkaller616973 Not tainted 4.15.0+ #200 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:17 [inline] dump_stack+0x194/0x257 lib/dump_stack.c:53 lockdep_rcu_suspicious+0x123/0x170 kernel/locking/lockdep.c:4592 __in_dev_get_rcu include/linux/inetdevice.h:216 [inline] igmpv3_get_srcaddr net/ipv4/igmp.c:329 [inline] igmpv3_newpack+0xeef/0x12e0 net/ipv4/igmp.c:389 add_grhead.isra.27+0x235/0x300 net/ipv4/igmp.c:432 add_grec+0xbd3/0x1170 net/ipv4/igmp.c:565 igmpv3_send_report+0xd5/0x5b0 net/ipv4/igmp.c:605 igmp_send_report+0xc43/0x1050 net/ipv4/igmp.c:722 igmp_timer_expire+0x322/0x5c0 net/ipv4/igmp.c:831 call_timer_fn+0x228/0x820 kernel/time/timer.c:1326 expire_timers kernel/time/timer.c:1363 [inline] __run_timers+0x7ee/0xb70 kernel/time/timer.c:1666 run_timer_softirq+0x4c/0x70 kernel/time/timer.c:1692 __do_softirq+0x2d7/0xb85 kernel/softirq.c:285 invoke_softirq kernel/softirq.c:365 [inline] irq_exit+0x1cc/0x200 kernel/softirq.c:405 exiting_irq arch/x86/include/asm/apic.h:541 [inline] smp_apic_timer_interrupt+0x16b/0x700 arch/x86/kernel/apic/apic.c:1052 apic_timer_interrupt+0xa9/0xb0 arch/x86/entry/entry_64.S:938 Fixes: a46182b00290 ("net: igmp: Use correct source address on IGMPv3 reports") Signed-off-by: Eric Dumazet Reported-by: syzbot Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/igmp.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 013fed55b610f3..fbeb35ad804bd4 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -386,7 +386,11 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) pip->frag_off = htons(IP_DF); pip->ttl = 1; pip->daddr = fl4.daddr; + + rcu_read_lock(); pip->saddr = igmpv3_get_srcaddr(dev, &fl4); + rcu_read_unlock(); + pip->protocol = IPPROTO_IGMP; pip->tot_len = 0; /* filled in later */ ip_select_ident(net, skb, NULL); From fcee7812ea05e0a9031f723a8f4206c2e0d74f5e Mon Sep 17 00:00:00 2001 From: Junxiao Bi Date: Mon, 29 Jan 2018 17:53:42 +0800 Subject: [PATCH 1765/3326] qlcnic: fix deadlock bug [ Upstream commit 233ac3891607f501f08879134d623b303838f478 ] The following soft lockup was caught. This is a deadlock caused by recusive locking. Process kworker/u40:1:28016 was holding spin lock "mbx->queue_lock" in qlcnic_83xx_mailbox_worker(), while a softirq came in and ask the same spin lock in qlcnic_83xx_enqueue_mbx_cmd(). This lock should be hold by disable bh.. [161846.962125] NMI watchdog: BUG: soft lockup - CPU#1 stuck for 22s! [kworker/u40:1:28016] [161846.962367] Modules linked in: tun ocfs2 xen_netback xen_blkback xen_gntalloc xen_gntdev xen_evtchn xenfs xen_privcmd autofs4 ocfs2_dlmfs ocfs2_stack_o2cb ocfs2_dlm ocfs2_nodemanager ocfs2_stackglue configfs bnx2fc fcoe libfcoe libfc sunrpc 8021q mrp garp bridge stp llc bonding dm_round_robin dm_multipath iTCO_wdt iTCO_vendor_support pcspkr sb_edac edac_core i2c_i801 shpchp lpc_ich mfd_core ioatdma ipmi_devintf ipmi_si ipmi_msghandler sg ext4 jbd2 mbcache2 sr_mod cdrom sd_mod igb i2c_algo_bit i2c_core ahci libahci megaraid_sas ixgbe dca ptp pps_core vxlan udp_tunnel ip6_udp_tunnel qla2xxx scsi_transport_fc qlcnic crc32c_intel be2iscsi bnx2i cnic uio cxgb4i cxgb4 cxgb3i libcxgbi ipv6 cxgb3 mdio libiscsi_tcp qla4xxx iscsi_boot_sysfs libiscsi scsi_transport_iscsi dm_mirror dm_region_hash dm_log dm_mod [161846.962454] [161846.962460] CPU: 1 PID: 28016 Comm: kworker/u40:1 Not tainted 4.1.12-94.5.9.el6uek.x86_64 #2 [161846.962463] Hardware name: Oracle Corporation SUN SERVER X4-2L /ASSY,MB,X4-2L , BIOS 26050100 09/19/2017 [161846.962489] Workqueue: qlcnic_mailbox qlcnic_83xx_mailbox_worker [qlcnic] [161846.962493] task: ffff8801f2e34600 ti: ffff88004ca5c000 task.ti: ffff88004ca5c000 [161846.962496] RIP: e030:[] [] xen_hypercall_sched_op+0xa/0x20 [161846.962506] RSP: e02b:ffff880202e43388 EFLAGS: 00000206 [161846.962509] RAX: 0000000000000000 RBX: ffff8801f6996b70 RCX: ffffffff810013aa [161846.962511] RDX: ffff880202e433cc RSI: ffff880202e433b0 RDI: 0000000000000003 [161846.962513] RBP: ffff880202e433d0 R08: 0000000000000000 R09: ffff8801fe893200 [161846.962516] R10: ffff8801fe400538 R11: 0000000000000206 R12: ffff880202e4b000 [161846.962518] R13: 0000000000000050 R14: 0000000000000001 R15: 000000000000020d [161846.962528] FS: 0000000000000000(0000) GS:ffff880202e40000(0000) knlGS:ffff880202e40000 [161846.962531] CS: e033 DS: 0000 ES: 0000 CR0: 0000000080050033 [161846.962533] CR2: 0000000002612640 CR3: 00000001bb796000 CR4: 0000000000042660 [161846.962536] Stack: [161846.962538] ffff880202e43608 0000000000000000 ffffffff813f0442 ffff880202e433b0 [161846.962543] 0000000000000000 ffff880202e433cc ffffffff00000001 0000000000000000 [161846.962547] 00000009813f03d6 ffff880202e433e0 ffffffff813f0460 ffff880202e43440 [161846.962552] Call Trace: [161846.962555] [161846.962565] [] ? xen_poll_irq_timeout+0x42/0x50 [161846.962570] [] xen_poll_irq+0x10/0x20 [161846.962578] [] xen_lock_spinning+0xe2/0x110 [161846.962583] [] __raw_callee_save_xen_lock_spinning+0x11/0x20 [161846.962592] [] ? _raw_spin_lock+0x57/0x80 [161846.962609] [] qlcnic_83xx_enqueue_mbx_cmd+0x7c/0xe0 [qlcnic] [161846.962623] [] qlcnic_83xx_issue_cmd+0x58/0x210 [qlcnic] [161846.962636] [] qlcnic_83xx_sre_macaddr_change+0x162/0x1d0 [qlcnic] [161846.962649] [] qlcnic_83xx_change_l2_filter+0x2b/0x30 [qlcnic] [161846.962657] [] ? __skb_flow_dissect+0x18b/0x650 [161846.962670] [] qlcnic_send_filter+0x205/0x250 [qlcnic] [161846.962682] [] qlcnic_xmit_frame+0x547/0x7b0 [qlcnic] [161846.962691] [] xmit_one+0x82/0x1a0 [161846.962696] [] dev_hard_start_xmit+0x50/0xa0 [161846.962701] [] sch_direct_xmit+0x112/0x220 [161846.962706] [] __dev_queue_xmit+0x1df/0x5e0 [161846.962710] [] dev_queue_xmit_sk+0x13/0x20 [161846.962721] [] bond_dev_queue_xmit+0x35/0x80 [bonding] [161846.962729] [] __bond_start_xmit+0x1cb/0x210 [bonding] [161846.962736] [] bond_start_xmit+0x31/0x60 [bonding] [161846.962740] [] xmit_one+0x82/0x1a0 [161846.962745] [] dev_hard_start_xmit+0x50/0xa0 [161846.962749] [] __dev_queue_xmit+0x4ee/0x5e0 [161846.962754] [] dev_queue_xmit_sk+0x13/0x20 [161846.962760] [] vlan_dev_hard_start_xmit+0xb2/0x150 [8021q] [161846.962764] [] xmit_one+0x82/0x1a0 [161846.962769] [] dev_hard_start_xmit+0x50/0xa0 [161846.962773] [] __dev_queue_xmit+0x4ee/0x5e0 [161846.962777] [] dev_queue_xmit_sk+0x13/0x20 [161846.962789] [] br_dev_queue_push_xmit+0x54/0xa0 [bridge] [161846.962797] [] br_forward_finish+0x2f/0x90 [bridge] [161846.962807] [] ? ttwu_do_wakeup+0x1d/0x100 [161846.962811] [] ? __alloc_skb+0x8b/0x1f0 [161846.962818] [] __br_forward+0x8d/0x120 [bridge] [161846.962822] [] ? __kmalloc_reserve+0x3b/0xa0 [161846.962829] [] ? update_rq_runnable_avg+0xee/0x230 [161846.962836] [] br_forward+0x96/0xb0 [bridge] [161846.962845] [] br_handle_frame_finish+0x1ae/0x420 [bridge] [161846.962853] [] br_handle_frame+0x17f/0x260 [bridge] [161846.962862] [] ? br_handle_frame_finish+0x420/0x420 [bridge] [161846.962867] [] __netif_receive_skb_core+0x1f7/0x870 [161846.962872] [] __netif_receive_skb+0x22/0x70 [161846.962877] [] netif_receive_skb_internal+0x23/0x90 [161846.962884] [] ? xenvif_idx_release+0xea/0x100 [xen_netback] [161846.962889] [] ? _raw_spin_unlock_irqrestore+0x20/0x50 [161846.962893] [] netif_receive_skb_sk+0x24/0x90 [161846.962899] [] xenvif_tx_submit+0x2ca/0x3f0 [xen_netback] [161846.962906] [] xenvif_tx_action+0x9c/0xd0 [xen_netback] [161846.962915] [] xenvif_poll+0x35/0x70 [xen_netback] [161846.962920] [] napi_poll+0xcb/0x1e0 [161846.962925] [] net_rx_action+0x90/0x1c0 [161846.962931] [] __do_softirq+0x10a/0x350 [161846.962938] [] irq_exit+0x125/0x130 [161846.962943] [] xen_evtchn_do_upcall+0x39/0x50 [161846.962950] [] xen_do_hypervisor_callback+0x1e/0x40 [161846.962952] [161846.962959] [] ? _raw_spin_lock+0x4a/0x80 [161846.962964] [] ? _raw_spin_lock_irqsave+0x1e/0xa0 [161846.962978] [] ? qlcnic_83xx_mailbox_worker+0xb9/0x2a0 [qlcnic] [161846.962991] [] ? process_one_work+0x151/0x4b0 [161846.962995] [] ? check_events+0x12/0x20 [161846.963001] [] ? worker_thread+0x120/0x480 [161846.963005] [] ? __schedule+0x30b/0x890 [161846.963010] [] ? process_one_work+0x4b0/0x4b0 [161846.963015] [] ? process_one_work+0x4b0/0x4b0 [161846.963021] [] ? kthread+0xce/0xf0 [161846.963025] [] ? kthread_freezable_should_stop+0x70/0x70 [161846.963031] [] ? ret_from_fork+0x42/0x70 [161846.963035] [] ? kthread_freezable_should_stop+0x70/0x70 [161846.963037] Code: cc 51 41 53 b8 1c 00 00 00 0f 05 41 5b 59 c3 cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc 51 41 53 b8 1d 00 00 00 0f 05 <41> 5b 59 c3 cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc Signed-off-by: Junxiao Bi Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- .../ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index f7080d0ab87462..46b0372dd03269 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3891,7 +3891,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) struct list_head *head = &mbx->cmd_q; struct qlcnic_cmd_args *cmd = NULL; - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); while (!list_empty(head)) { cmd = list_entry(head->next, struct qlcnic_cmd_args, list); @@ -3902,7 +3902,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) qlcnic_83xx_notify_cmd_completion(adapter, cmd); } - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); } static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter) @@ -3938,12 +3938,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter, { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); list_del(&cmd->list); mbx->num_cmds--; - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); qlcnic_83xx_notify_cmd_completion(adapter, cmd); } @@ -4008,7 +4008,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, init_completion(&cmd->completion); cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN; - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); list_add_tail(&cmd->list, &mbx->cmd_q); mbx->num_cmds++; @@ -4016,7 +4016,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT; queue_work(mbx->work_q, &mbx->work); - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); return 0; } @@ -4112,15 +4112,15 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT; spin_unlock_irqrestore(&mbx->aen_lock, flags); - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); if (list_empty(head)) { - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); return; } cmd = list_entry(head->next, struct qlcnic_cmd_args, list); - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); mbx_ops->encode_cmd(adapter, cmd); mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST); From a7c2cf702a4021f40a8948b9c5abcb8a81668ca4 Mon Sep 17 00:00:00 2001 From: Kristian Evensen Date: Tue, 30 Jan 2018 14:12:55 +0100 Subject: [PATCH 1766/3326] qmi_wwan: Add support for Quectel EP06 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit c0b91a56a2e57a5a370655b25d677ae0ebf8a2d0 ] The Quectel EP06 is a Cat. 6 LTE modem. It uses the same interface as the EC20/EC25 for QMI, and requires the same "set DTR"-quirk to work. Signed-off-by: Kristian Evensen Acked-by: Bjørn Mork Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 2092febfcb4253..8d9f02b7a71f2c 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1243,6 +1243,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ From e8513f250d1b632f17cf909ff6b76293adc5f682 Mon Sep 17 00:00:00 2001 From: Chunhao Lin Date: Wed, 31 Jan 2018 01:32:36 +0800 Subject: [PATCH 1767/3326] r8169: fix RTL8168EP take too long to complete driver initialization. [ Upstream commit 086ca23d03c0d2f4088f472386778d293e15c5f6 ] Driver check the wrong register bit in rtl_ocp_tx_cond() that keep driver waiting until timeout. Fix this by waiting for the right register bit. Signed-off-by: Chunhao Lin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/realtek/r8169.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 958ff931e790a9..619a1b7281a0b7 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1388,7 +1388,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond) { void __iomem *ioaddr = tp->mmio_addr; - return RTL_R8(IBISR0) & 0x02; + return RTL_R8(IBISR0) & 0x20; } static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) @@ -1396,7 +1396,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); - rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000); + rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000); RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); } From 6d35430fdaf47880cacda975a7d1b1588f933357 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Fri, 26 Jan 2018 16:40:41 +0800 Subject: [PATCH 1768/3326] tcp: release sk_frag.page in tcp_disconnect [ Upstream commit 9b42d55a66d388e4dd5550107df051a9637564fc ] socket can be disconnected and gets transformed back to a listening socket, if sk_frag.page is not released, which will be cloned into a new socket by sk_clone_lock, but the reference count of this page is increased, lead to a use after free or double free issue Signed-off-by: Li RongQing Cc: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2a65d806b562b9..fe11128d7df46a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2379,6 +2379,12 @@ int tcp_disconnect(struct sock *sk, int flags) WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); + if (sk->sk_frag.page) { + put_page(sk->sk_frag.page); + sk->sk_frag.page = NULL; + sk->sk_frag.offset = 0; + } + sk->sk_error_report(sk); return err; } From b9b70c876a7a73d494cb9d9f32a227dcfac54a4c Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 25 Jan 2018 22:03:52 +0800 Subject: [PATCH 1769/3326] vhost_net: stop device during reset owner [ Upstream commit 4cd879515d686849eec5f718aeac62a70b067d82 ] We don't stop device before reset owner, this means we could try to serve any virtqueue kick before reset dev->worker. This will result a warn since the work was pending at llist during owner resetting. Fix this by stopping device during owner reset. Reported-by: syzbot+eb17c6162478cc50632c@syzkaller.appspotmail.com Fixes: 3a4d5c94e9593 ("vhost_net: a kernel-level virtio server") Signed-off-by: Jason Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/vhost/net.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index bd15309ac5f167..082891dffd9d57 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -1212,6 +1212,7 @@ static long vhost_net_reset_owner(struct vhost_net *n) } vhost_net_stop(n, &tx_sock, &rx_sock); vhost_net_flush(n); + vhost_dev_stop(&n->dev); vhost_dev_reset_owner(&n->dev, umem); vhost_net_vq_reset(n); done: From 799a34d5b048644a918b295627c3594124d96d4c Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 2 Feb 2018 15:26:57 +0000 Subject: [PATCH 1770/3326] Revert "defer call to mem_cgroup_sk_alloc()" [ Upstream commit edbe69ef2c90fc86998a74b08319a01c508bd497 ] This patch effectively reverts commit 9f1c2674b328 ("net: memcontrol: defer call to mem_cgroup_sk_alloc()"). Moving mem_cgroup_sk_alloc() to the inet_csk_accept() completely breaks memcg socket memory accounting, as packets received before memcg pointer initialization are not accounted and are causing refcounting underflow on socket release. Actually the free-after-use problem was fixed by commit c0576e397508 ("net: call cgroup_sk_alloc() earlier in sk_clone_lock()") for the cgroup pointer. So, let's revert it and call mem_cgroup_sk_alloc() just before cgroup_sk_alloc(). This is safe, as we hold a reference to the socket we're cloning, and it holds a reference to the memcg. Also, let's drop BUG_ON(mem_cgroup_is_root()) check from mem_cgroup_sk_alloc(). I see no reasons why bumping the root memcg counter is a good reason to panic, and there are no realistic ways to hit it. Signed-off-by: Roman Gushchin Cc: Eric Dumazet Cc: David S. Miller Cc: Johannes Weiner Cc: Tejun Heo Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- mm/memcontrol.c | 14 ++++++++++++++ net/core/sock.c | 5 +---- net/ipv4/inet_connection_sock.c | 1 - 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 53f7c919b9160b..66e7efabf0a1eb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5828,6 +5828,20 @@ void mem_cgroup_sk_alloc(struct sock *sk) if (!mem_cgroup_sockets_enabled) return; + /* + * Socket cloning can throw us here with sk_memcg already + * filled. It won't however, necessarily happen from + * process context. So the test for root memcg given + * the current task's memcg won't help us in this case. + * + * Respecting the original socket's memcg is a better + * decision in this case. + */ + if (sk->sk_memcg) { + css_get(&sk->sk_memcg->css); + return; + } + rcu_read_lock(); memcg = mem_cgroup_from_task(current); if (memcg == root_mem_cgroup) diff --git a/net/core/sock.c b/net/core/sock.c index 415f441c63b9e2..beb1e299fed385 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1677,16 +1677,13 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) newsk->sk_dst_pending_confirm = 0; newsk->sk_wmem_queued = 0; newsk->sk_forward_alloc = 0; - - /* sk->sk_memcg will be populated at accept() time */ - newsk->sk_memcg = NULL; - atomic_set(&newsk->sk_drops, 0); newsk->sk_send_head = NULL; newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; atomic_set(&newsk->sk_zckey, 0); sock_reset_flag(newsk, SOCK_DONE); + mem_cgroup_sk_alloc(newsk); cgroup_sk_alloc(&newsk->sk_cgrp_data); rcu_read_lock(); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index b47a59cb3573b3..0cc08c51220230 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -475,7 +475,6 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern) } spin_unlock_bh(&queue->fastopenq.lock); } - mem_cgroup_sk_alloc(newsk); out: release_sock(sk); if (req) From 07ca93e3176bfdc5e954b79c0dcee0fb95aef6fb Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 25 Jan 2018 20:16:29 -0800 Subject: [PATCH 1771/3326] net: ipv6: send unsolicited NA after DAD [ Upstream commit c76fe2d98c726224a975a0d0198c3fb50406d325 ] Unsolicited IPv6 neighbor advertisements should be sent after DAD completes. Update ndisc_send_unsol_na to skip tentative, non-optimistic addresses and have those sent by addrconf_dad_completed after DAD. Fixes: 4a6e3c5def13c ("net: ipv6: send unsolicited NA on admin up") Reported-by: Vivek Venkatraman Signed-off-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/addrconf.c | 30 ++++++++++++++++++++++++++---- net/ipv6/ndisc.c | 5 +++++ 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c5318f5f6a1441..6a76e41e6d5169 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -184,7 +184,8 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, static void addrconf_dad_start(struct inet6_ifaddr *ifp); static void addrconf_dad_work(struct work_struct *w); -static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id); +static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, + bool send_na); static void addrconf_dad_run(struct inet6_dev *idev); static void addrconf_rs_timer(unsigned long data); static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); @@ -3825,12 +3826,17 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) idev->cnf.accept_dad < 1) || !(ifp->flags&IFA_F_TENTATIVE) || ifp->flags & IFA_F_NODAD) { + bool send_na = false; + + if (ifp->flags & IFA_F_TENTATIVE && + !(ifp->flags & IFA_F_OPTIMISTIC)) + send_na = true; bump_id = ifp->flags & IFA_F_TENTATIVE; ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); spin_unlock(&ifp->lock); read_unlock_bh(&idev->lock); - addrconf_dad_completed(ifp, bump_id); + addrconf_dad_completed(ifp, bump_id, send_na); return; } @@ -3959,16 +3965,21 @@ static void addrconf_dad_work(struct work_struct *w) } if (ifp->dad_probes == 0) { + bool send_na = false; + /* * DAD was successful */ + if (ifp->flags & IFA_F_TENTATIVE && + !(ifp->flags & IFA_F_OPTIMISTIC)) + send_na = true; bump_id = ifp->flags & IFA_F_TENTATIVE; ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); spin_unlock(&ifp->lock); write_unlock_bh(&idev->lock); - addrconf_dad_completed(ifp, bump_id); + addrconf_dad_completed(ifp, bump_id, send_na); goto out; } @@ -4006,7 +4017,8 @@ static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp) return true; } -static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id) +static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, + bool send_na) { struct net_device *dev = ifp->idev->dev; struct in6_addr lladdr; @@ -4038,6 +4050,16 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id) if (send_mld) ipv6_mc_dad_complete(ifp->idev); + /* send unsolicited NA if enabled */ + if (send_na && + (ifp->idev->cnf.ndisc_notify || + dev_net(dev)->ipv6.devconf_all->ndisc_notify)) { + ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr, + /*router=*/ !!ifp->idev->cnf.forwarding, + /*solicited=*/ false, /*override=*/ true, + /*inc_opt=*/ true); + } + if (send_rs) { /* * If a host as already performed a random delay diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 266a530414d7be..2a937c8d19e96d 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -558,6 +558,11 @@ static void ndisc_send_unsol_na(struct net_device *dev) read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { + /* skip tentative addresses until dad completes */ + if (ifa->flags & IFA_F_TENTATIVE && + !(ifa->flags & IFA_F_OPTIMISTIC)) + continue; + ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifa->addr, /*router=*/ !!idev->cnf.forwarding, /*solicited=*/ false, /*override=*/ true, From 1da27118f419208acd895fc95713dcd9ef25a289 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 1 Feb 2018 12:21:15 +0100 Subject: [PATCH 1772/3326] rocker: fix possible null pointer dereference in rocker_router_fib_event_work [ Upstream commit a83165f00f16c0e0ef5b7cec3cbd0d4788699265 ] Currently, rocker user may experience following null pointer derefence bug: [ 3.062141] BUG: unable to handle kernel NULL pointer dereference at 00000000000000d0 [ 3.065163] IP: rocker_router_fib_event_work+0x36/0x110 [rocker] The problem is uninitialized rocker->wops pointer that is initialized only with the first initialized port. So move the port initialization before registering the fib events. Fixes: 936bd486564a ("rocker: use FIB notifications instead of switchdev calls") Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/rocker/rocker_main.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index fc8f8bdf657912..056cb609363035 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2902,6 +2902,12 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_alloc_ordered_workqueue; } + err = rocker_probe_ports(rocker); + if (err) { + dev_err(&pdev->dev, "failed to probe ports\n"); + goto err_probe_ports; + } + /* Only FIBs pointing to our own netdevs are programmed into * the device, so no need to pass a callback. */ @@ -2918,22 +2924,16 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) rocker->hw.id = rocker_read64(rocker, SWITCH_ID); - err = rocker_probe_ports(rocker); - if (err) { - dev_err(&pdev->dev, "failed to probe ports\n"); - goto err_probe_ports; - } - dev_info(&pdev->dev, "Rocker switch with id %*phN\n", (int)sizeof(rocker->hw.id), &rocker->hw.id); return 0; -err_probe_ports: - unregister_switchdev_notifier(&rocker_switchdev_notifier); err_register_switchdev_notifier: unregister_fib_notifier(&rocker->fib_nb); err_register_fib_notifier: + rocker_remove_ports(rocker); +err_probe_ports: destroy_workqueue(rocker->rocker_owq); err_alloc_ordered_workqueue: free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); @@ -2961,9 +2961,9 @@ static void rocker_remove(struct pci_dev *pdev) { struct rocker *rocker = pci_get_drvdata(pdev); - rocker_remove_ports(rocker); unregister_switchdev_notifier(&rocker_switchdev_notifier); unregister_fib_notifier(&rocker->fib_nb); + rocker_remove_ports(rocker); rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); destroy_workqueue(rocker->rocker_owq); free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); From c04818abad468f5abb45f191eee618b159e1a215 Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Wed, 31 Jan 2018 15:43:05 -0500 Subject: [PATCH 1773/3326] tcp_bbr: fix pacing_gain to always be unity when using lt_bw [ Upstream commit 3aff3b4b986e51bcf4ab249e5d48d39596e0df6a ] This commit fixes the pacing_gain to remain at BBR_UNIT (1.0) when using lt_bw and returning from the PROBE_RTT state to PROBE_BW. Previously, when using lt_bw, upon exiting PROBE_RTT and entering PROBE_BW the bbr_reset_probe_bw_mode() code could sometimes randomly end up with a cycle_idx of 0 and hence have bbr_advance_cycle_phase() set a pacing gain above 1.0. In such cases this would result in a pacing rate that is 1.25x higher than intended, potentially resulting in a high loss rate for a little while until we stop using the lt_bw a bit later. This commit is a stable candidate for kernels back as far as 4.9. Fixes: 0f8782ea1497 ("tcp_bbr: add BBR congestion control") Signed-off-by: Neal Cardwell Signed-off-by: Yuchung Cheng Signed-off-by: Soheil Hassas Yeganeh Reported-by: Beyers Cronje Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_bbr.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 8322f26e770e44..25c5a0b60cfcc6 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -481,7 +481,8 @@ static void bbr_advance_cycle_phase(struct sock *sk) bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1); bbr->cycle_mstamp = tp->delivered_mstamp; - bbr->pacing_gain = bbr_pacing_gain[bbr->cycle_idx]; + bbr->pacing_gain = bbr->lt_use_bw ? BBR_UNIT : + bbr_pacing_gain[bbr->cycle_idx]; } /* Gain cycling: cycle pacing gain to converge to fair share of available bw. */ @@ -490,8 +491,7 @@ static void bbr_update_cycle_phase(struct sock *sk, { struct bbr *bbr = inet_csk_ca(sk); - if ((bbr->mode == BBR_PROBE_BW) && !bbr->lt_use_bw && - bbr_is_next_cycle_phase(sk, rs)) + if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs)) bbr_advance_cycle_phase(sk); } From 456add4c9bd3bd97a6ab34fc9a5866358ca18d25 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 24 Jan 2018 23:15:27 -0800 Subject: [PATCH 1774/3326] ipv6: Fix SO_REUSEPORT UDP socket with implicit sk_ipv6only [ Upstream commit 7ece54a60ee2ba7a386308cae73c790bd580589c ] If a sk_v6_rcv_saddr is !IPV6_ADDR_ANY and !IPV6_ADDR_MAPPED, it implicitly implies it is an ipv6only socket. However, in inet6_bind(), this addr_type checking and setting sk->sk_ipv6only to 1 are only done after sk->sk_prot->get_port(sk, snum) has been completed successfully. This inconsistency between sk_v6_rcv_saddr and sk_ipv6only confuses the 'get_port()'. In particular, when binding SO_REUSEPORT UDP sockets, udp_reuseport_add_sock(sk,...) is called. udp_reuseport_add_sock() checks "ipv6_only_sock(sk2) == ipv6_only_sock(sk)" before adding sk to sk2->sk_reuseport_cb. In this case, ipv6_only_sock(sk2) could be 1 while ipv6_only_sock(sk) is still 0 here. The end result is, reuseport_alloc(sk) is called instead of adding sk to the existing sk2->sk_reuseport_cb. It can be reproduced by binding two SO_REUSEPORT UDP sockets on an IPv6 address (!ANY and !MAPPED). Only one of the socket will receive packet. The fix is to set the implicit sk_ipv6only before calling get_port(). The original sk_ipv6only has to be saved such that it can be restored in case get_port() failed. The situation is similar to the inet_reset_saddr(sk) after get_port() has failed. Thanks to Calvin Owens who created an easy reproduction which leads to a fix. Fixes: e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection") Signed-off-by: Martin KaFai Lau Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/af_inet6.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index bcbd5f3bf8bd08..9ccbf74deb99c2 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -284,6 +284,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct net *net = sock_net(sk); __be32 v4addr = 0; unsigned short snum; + bool saved_ipv6only; int addr_type = 0; int err = 0; @@ -389,19 +390,21 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (!(addr_type & IPV6_ADDR_MULTICAST)) np->saddr = addr->sin6_addr; + saved_ipv6only = sk->sk_ipv6only; + if (addr_type != IPV6_ADDR_ANY && addr_type != IPV6_ADDR_MAPPED) + sk->sk_ipv6only = 1; + /* Make sure we are allowed to bind here. */ if ((snum || !inet->bind_address_no_port) && sk->sk_prot->get_port(sk, snum)) { + sk->sk_ipv6only = saved_ipv6only; inet_reset_saddr(sk); err = -EADDRINUSE; goto out; } - if (addr_type != IPV6_ADDR_ANY) { + if (addr_type != IPV6_ADDR_ANY) sk->sk_userlocks |= SOCK_BINDADDR_LOCK; - if (addr_type != IPV6_ADDR_MAPPED) - sk->sk_ipv6only = 1; - } if (snum) sk->sk_userlocks |= SOCK_BINDPORT_LOCK; inet->inet_sport = htons(inet->inet_num); From 981f20bc75952ffa1ebe5b1bb2b9f35ed490a354 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 2 Feb 2018 10:27:27 -0800 Subject: [PATCH 1775/3326] soreuseport: fix mem leak in reuseport_add_sock() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 4db428a7c9ab07e08783e0fcdc4ca0f555da0567 ] reuseport_add_sock() needs to deal with attaching a socket having its own sk_reuseport_cb, after a prior setsockopt(SO_ATTACH_REUSEPORT_?BPF) Without this fix, not only a WARN_ONCE() was issued, but we were also leaking memory. Thanks to sysbot and Eric Biggers for providing us nice C repros. ------------[ cut here ]------------ socket already in reuseport group WARNING: CPU: 0 PID: 3496 at net/core/sock_reuseport.c:119   reuseport_add_sock+0x742/0x9b0 net/core/sock_reuseport.c:117 Kernel panic - not syncing: panic_on_warn set ... CPU: 0 PID: 3496 Comm: syzkaller869503 Not tainted 4.15.0-rc6+ #245 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS   Google 01/01/2011 Call Trace:   __dump_stack lib/dump_stack.c:17 [inline]   dump_stack+0x194/0x257 lib/dump_stack.c:53   panic+0x1e4/0x41c kernel/panic.c:183   __warn+0x1dc/0x200 kernel/panic.c:547   report_bug+0x211/0x2d0 lib/bug.c:184   fixup_bug.part.11+0x37/0x80 arch/x86/kernel/traps.c:178   fixup_bug arch/x86/kernel/traps.c:247 [inline]   do_error_trap+0x2d7/0x3e0 arch/x86/kernel/traps.c:296   do_invalid_op+0x1b/0x20 arch/x86/kernel/traps.c:315   invalid_op+0x22/0x40 arch/x86/entry/entry_64.S:1079 Fixes: ef456144da8e ("soreuseport: define reuseport groups") Signed-off-by: Eric Dumazet Reported-by: syzbot+c0ea2226f77a42936bf7@syzkaller.appspotmail.com Acked-by: Craig Gallek Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/sock_reuseport.c | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index 5eeb1d20cc3887..676092d7bd8129 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -94,6 +94,16 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) return more_reuse; } +static void reuseport_free_rcu(struct rcu_head *head) +{ + struct sock_reuseport *reuse; + + reuse = container_of(head, struct sock_reuseport, rcu); + if (reuse->prog) + bpf_prog_destroy(reuse->prog); + kfree(reuse); +} + /** * reuseport_add_sock - Add a socket to the reuseport group of another. * @sk: New socket to add to the group. @@ -102,7 +112,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) */ int reuseport_add_sock(struct sock *sk, struct sock *sk2) { - struct sock_reuseport *reuse; + struct sock_reuseport *old_reuse, *reuse; if (!rcu_access_pointer(sk2->sk_reuseport_cb)) { int err = reuseport_alloc(sk2); @@ -113,10 +123,13 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2) spin_lock_bh(&reuseport_lock); reuse = rcu_dereference_protected(sk2->sk_reuseport_cb, - lockdep_is_held(&reuseport_lock)), - WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb, - lockdep_is_held(&reuseport_lock)), - "socket already in reuseport group"); + lockdep_is_held(&reuseport_lock)); + old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb, + lockdep_is_held(&reuseport_lock)); + if (old_reuse && old_reuse->num_socks != 1) { + spin_unlock_bh(&reuseport_lock); + return -EBUSY; + } if (reuse->num_socks == reuse->max_socks) { reuse = reuseport_grow(reuse); @@ -134,19 +147,11 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2) spin_unlock_bh(&reuseport_lock); + if (old_reuse) + call_rcu(&old_reuse->rcu, reuseport_free_rcu); return 0; } -static void reuseport_free_rcu(struct rcu_head *head) -{ - struct sock_reuseport *reuse; - - reuse = container_of(head, struct sock_reuseport, rcu); - if (reuse->prog) - bpf_prog_destroy(reuse->prog); - kfree(reuse); -} - void reuseport_detach_sock(struct sock *sk) { struct sock_reuseport *reuse; From 1bf81cff1bad67632e4ba68cbe0c843541439de8 Mon Sep 17 00:00:00 2001 From: Jesse Chan Date: Mon, 20 Nov 2017 02:47:54 -0500 Subject: [PATCH 1776/3326] media: mtk-vcodec: add missing MODULE_LICENSE/DESCRIPTION commit ccbc1e3876d5476fc23429690a683294ef0f755b upstream. This change resolves a new compile-time warning when built as a loadable module: WARNING: modpost: missing MODULE_LICENSE() in drivers/media/platform/mtk-vcodec/mtk-vcodec-common.o see include/linux/module.h for more information This adds the license as "GPL v2", which matches the header of the file. MODULE_DESCRIPTION is also added. Signed-off-by: Jesse Chan Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c index 46768c0561936c..0c28d0b995ccd7 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c @@ -115,3 +115,6 @@ struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev) return ctx; } EXPORT_SYMBOL(mtk_vcodec_get_curr_ctx); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Mediatek video codec driver"); From 68a9f19264fd23ec78c18724d56fd71dbe5f23b1 Mon Sep 17 00:00:00 2001 From: Jesse Chan Date: Mon, 20 Nov 2017 15:56:28 -0500 Subject: [PATCH 1777/3326] media: soc_camera: soc_scale_crop: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE commit 5331aec1bf9c9da557668174e0a4bfcee39f1121 upstream. This change resolves a new compile-time warning when built as a loadable module: WARNING: modpost: missing MODULE_LICENSE() in drivers/media/platform/soc_camera/soc_scale_crop.o see include/linux/module.h for more information This adds the license as "GPL", which matches the header of the file. MODULE_DESCRIPTION and MODULE_AUTHOR are also added. Signed-off-by: Jesse Chan Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/platform/soc_camera/soc_scale_crop.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c index 0116097c0c0faf..092c73f2458973 100644 --- a/drivers/media/platform/soc_camera/soc_scale_crop.c +++ b/drivers/media/platform/soc_camera/soc_scale_crop.c @@ -419,3 +419,7 @@ void soc_camera_calc_client_output(struct soc_camera_device *icd, mf->height = soc_camera_shift_scale(rect->height, shift, scale_v); } EXPORT_SYMBOL(soc_camera_calc_client_output); + +MODULE_DESCRIPTION("soc-camera scaling-cropping functions"); +MODULE_AUTHOR("Guennadi Liakhovetski "); +MODULE_LICENSE("GPL"); From 3a570cfe785a1d031ee56e3c4a3534cb36e26b65 Mon Sep 17 00:00:00 2001 From: Robert Baronescu Date: Tue, 10 Oct 2017 13:21:59 +0300 Subject: [PATCH 1778/3326] crypto: tcrypt - fix S/G table for test_aead_speed() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 5c6ac1d4f8fbdbed65dbeb8cf149d736409d16a1 upstream. In case buffer length is a multiple of PAGE_SIZE, the S/G table is incorrectly generated. Fix this by handling buflen = k * PAGE_SIZE separately. Signed-off-by: Robert Baronescu Signed-off-by: Herbert Xu Signed-off-by: Horia Geantă Signed-off-by: Greg Kroah-Hartman --- crypto/tcrypt.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index f5f58a6eee5ddc..e339960dcac7ba 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -221,11 +221,13 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], } sg_init_table(sg, np + 1); - np--; + if (rem) + np--; for (k = 0; k < np; k++) sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE); - sg_set_buf(&sg[k + 1], xbuf[k], rem); + if (rem) + sg_set_buf(&sg[k + 1], xbuf[k], rem); } static void test_aead_speed(const char *algo, int enc, unsigned int secs, From c5c91d8305074ce1ae72df29c7446850fc927a4f Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Thu, 14 Dec 2017 15:33:19 -0800 Subject: [PATCH 1779/3326] arch: define weak abort() commit 7c2c11b208be09c156573fc0076b7b3646e05219 upstream. gcc toggle -fisolate-erroneous-paths-dereference (default at -O2 onwards) isolates faulty code paths such as null pointer access, divide by zero etc. If gcc port doesnt implement __builtin_trap, an abort() is generated which causes kernel link error. In this case, gcc is generating abort due to 'divide by zero' in lib/mpi/mpih-div.c. Currently 'frv' and 'arc' are failing. Previously other arch was also broken like m32r was fixed by commit d22e3d69ee1a ("m32r: fix build failure"). Let's define this weak function which is common for all arch and fix the problem permanently. We can even remove the arch specific 'abort' after this is done. Link: http://lkml.kernel.org/r/1513118956-8718-1-git-send-email-sudipm.mukherjee@gmail.com Signed-off-by: Sudip Mukherjee Cc: Alexey Brodkin Cc: Vineet Gupta Cc: Sudip Mukherjee Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Cc: Evgeniy Didin Signed-off-by: Greg Kroah-Hartman --- kernel/exit.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/exit.c b/kernel/exit.c index f6cad39f35dfbe..5a0c4791473c3c 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1755,3 +1755,11 @@ COMPAT_SYSCALL_DEFINE5(waitid, return -EFAULT; } #endif + +__weak void abort(void) +{ + BUG(); + + /* if that doesn't kill us, halt */ + panic("Oops failed to kill thread"); +} From f18046f7a50accf76dab9c6ab274a7e38143a0dc Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 4 Jan 2018 16:17:56 -0800 Subject: [PATCH 1780/3326] kernel/exit.c: export abort() to modules commit dc8635b78cd8669c37e230058d18c33af7451ab1 upstream. gcc -fisolate-erroneous-paths-dereference can generate calls to abort() from modular code too. [arnd@arndb.de: drop duplicate exports of abort()] Link: http://lkml.kernel.org/r/20180102103311.706364-1-arnd@arndb.de Reported-by: Vineet Gupta Cc: Sudip Mukherjee Cc: Arnd Bergmann Cc: Alexey Brodkin Cc: Russell King Cc: Jose Abreu Signed-off-by: Andrew Morton Signed-off-by: Arnd Bergmann Signed-off-by: Linus Torvalds Cc: Evgeniy Didin Signed-off-by: Greg Kroah-Hartman --- arch/arm/kernel/traps.c | 1 - arch/m32r/kernel/traps.c | 1 - arch/unicore32/kernel/traps.c | 1 - kernel/exit.c | 1 + 4 files changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 0fcd82f013883c..b8dc3b516f937f 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -790,7 +790,6 @@ void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } -EXPORT_SYMBOL(abort); void __init trap_init(void) { diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index cb79fba79d4391..b88a8dd149333d 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -122,7 +122,6 @@ void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } -EXPORT_SYMBOL(abort); void __init trap_init(void) { diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c index 5f25b39f04d430..c4ac6043ebb0fc 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c @@ -298,7 +298,6 @@ void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } -EXPORT_SYMBOL(abort); void __init trap_init(void) { diff --git a/kernel/exit.c b/kernel/exit.c index 5a0c4791473c3c..e3a08761eb4074 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1763,3 +1763,4 @@ __weak void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } +EXPORT_SYMBOL(abort); From 22f16a74c4744f8b0d83fd0d031be2137e764cbf Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 16 Jan 2018 13:40:22 +0300 Subject: [PATCH 1781/3326] scsi: storvsc: missing error code in storvsc_probe() commit ca8dc694045e9aa248e9916e0f614deb0494cb3d upstream. We should set the error code if fc_remote_port_add() fails. Cc: #v4.12+ Fixes: daf0cd445a21 ("scsi: storvsc: Add support for FC rport.") Signed-off-by: Dan Carpenter Reviewed-by: Cathy Avery Acked-by: K. Y. Srinivasan Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman Signed-off-by: Long Li --- drivers/scsi/storvsc_drv.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 5e7200f0587364..c17ccb913fde93 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1826,8 +1826,10 @@ static int storvsc_probe(struct hv_device *device, fc_host_node_name(host) = stor_device->node_name; fc_host_port_name(host) = stor_device->port_name; stor_device->rport = fc_remote_port_add(host, 0, &ids); - if (!stor_device->rport) + if (!stor_device->rport) { + ret = -ENOMEM; goto err_out3; + } } #endif return 0; From d2aeb4e2bd8db42589fd7624cb9453894ab455ec Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 13 Feb 2018 10:14:14 +0100 Subject: [PATCH 1782/3326] Revert "x86/alternative: Print unadorned pointers" This reverts commit 67eb59b8ecfb319438706cee2cb67a3045b54494. It's not needed in 4.14.y and only causes messy debugging messages, if anyone actually cares about these random debug messages in the first place (doubtful). Reported-by: Kees Cook Acked-by: Borislav Petkov Cc: Thomas Gleixner Cc: riel@redhat.com Cc: ak@linux.intel.com Cc: peterz@infradead.org Cc: David Woodhouse Cc: jikos@kernel.org Cc: luto@amacapital.net Cc: dave.hansen@intel.com Cc: torvalds@linux-foundation.org Cc: keescook@google.com Cc: Josh Poimboeuf Cc: tim.c.chen@linux.intel.com Cc: gregkh@linux-foundation.org Cc: pjt@google.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/alternative.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 21be0193d9dc3e..b034826a0b3b8f 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -287,7 +287,7 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) tgt_rip = next_rip + o_dspl; n_dspl = tgt_rip - orig_insn; - DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl); + DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl); if (tgt_rip - orig_insn >= 0) { if (n_dspl - 2 <= 127) @@ -344,7 +344,7 @@ static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *ins add_nops(instr + (a->instrlen - a->padlen), a->padlen); local_irq_restore(flags); - DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ", + DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", instr, a->instrlen - a->padlen, a->padlen); } @@ -365,7 +365,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, u8 *instr, *replacement; u8 insnbuf[MAX_PATCH_LEN]; - DPRINTK("alt table %px, -> %px", start, end); + DPRINTK("alt table %p -> %p", start, end); /* * The scan order should be from start to end. A later scanned * alternative code can overwrite previously scanned alternative code. @@ -389,14 +389,14 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, continue; } - DPRINTK("feat: %d*32+%d, old: (%px len: %d), repl: (%px, len: %d), pad: %d", + DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d", a->cpuid >> 5, a->cpuid & 0x1f, instr, a->instrlen, replacement, a->replacementlen, a->padlen); - DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr); - DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement); + DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr); + DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement); memcpy(insnbuf, replacement, a->replacementlen); insnbuf_sz = a->replacementlen; @@ -422,7 +422,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, a->instrlen - a->replacementlen); insnbuf_sz += a->instrlen - a->replacementlen; } - DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr); + DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr); text_poke_early(instr, insnbuf, insnbuf_sz); } From 1722fe3727845178e5d7fe3fdf35b8a5de230fdf Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 13 Feb 2018 10:19:50 +0100 Subject: [PATCH 1783/3326] Linux 4.14.19 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a69e5da9ed86a0..76a0b13623f418 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 18 +SUBLEVEL = 19 EXTRAVERSION = NAME = Petit Gorille From 8caab9edccf57540770352825f9d3fa2d8343ae2 Mon Sep 17 00:00:00 2001 From: Matt Redfearn Date: Tue, 14 Nov 2017 10:52:54 +0000 Subject: [PATCH 1784/3326] watchdog: indydog: Add dependency on SGI_HAS_INDYDOG commit 24f8d233074badd4c18e4dafd2fb97d65838afed upstream. Commit da2a68b3eb47 ("watchdog: Enable COMPILE_TEST where possible") enabled building the Indy watchdog driver when COMPILE_TEST is enabled. However, the driver makes reference to symbols that are only defined for certain platforms are selected in the config. These platforms select SGI_HAS_INDYDOG. Without this, link time errors result, for example when building a MIPS allyesconfig. drivers/watchdog/indydog.o: In function `indydog_write': indydog.c:(.text+0x18): undefined reference to `sgimc' indydog.c:(.text+0x1c): undefined reference to `sgimc' drivers/watchdog/indydog.o: In function `indydog_start': indydog.c:(.text+0x54): undefined reference to `sgimc' indydog.c:(.text+0x58): undefined reference to `sgimc' drivers/watchdog/indydog.o: In function `indydog_stop': indydog.c:(.text+0xa4): undefined reference to `sgimc' drivers/watchdog/indydog.o:indydog.c:(.text+0xa8): more undefined references to `sgimc' follow make: *** [Makefile:1005: vmlinux] Error 1 Fix this by ensuring that CONFIG_INDIDOG can only be selected when the necessary dependent platform symbols are built in. Fixes: da2a68b3eb47 ("watchdog: Enable COMPILE_TEST where possible") Signed-off-by: Matt Redfearn Signed-off-by: Ralf Baechle Suggested-by: James Hogan Reviewed-by: Guenter Roeck Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck Signed-off-by: Greg Kroah-Hartman --- drivers/watchdog/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index c722cbfdc7e64b..3ece1335ba8403 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -1451,7 +1451,7 @@ config RC32434_WDT config INDYDOG tristate "Indy/I2 Hardware Watchdog" - depends on SGI_HAS_INDYDOG || (MIPS && COMPILE_TEST) + depends on SGI_HAS_INDYDOG help Hardware driver for the Indy's/I2's watchdog. This is a watchdog timer that will reboot the machine after a 60 second From b4a9ffad970244d0a349e4e06e5bb332790d7715 Mon Sep 17 00:00:00 2001 From: Michal Suchanek Date: Mon, 15 Jan 2018 14:30:03 +0100 Subject: [PATCH 1785/3326] powerpc/pseries: include linux/types.h in asm/hvcall.h commit 1b689a95ce7427075f9ac9fb4aea1af530742b7f upstream. Commit 6e032b350cd1 ("powerpc/powernv: Check device-tree for RFI flush settings") uses u64 in asm/hvcall.h without including linux/types.h This breaks hvcall.h users that do not include the header themselves. Fixes: 6e032b350cd1 ("powerpc/powernv: Check device-tree for RFI flush settings") Signed-off-by: Michal Suchanek Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/include/asm/hvcall.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index f0461618bf7bee..eca3f9c6890706 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -353,6 +353,7 @@ #define PROC_TABLE_GTSE 0x01 #ifndef __ASSEMBLY__ +#include /** * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments From 061df7705a53179ecb7f73a6b995bf7170a794d8 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 15 Dec 2017 12:48:32 -0800 Subject: [PATCH 1786/3326] cifs: Fix missing put_xid in cifs_file_strict_mmap commit f04a703c3d613845ae3141bfaf223489de8ab3eb upstream. If cifs_zap_mapping() returned an error, we would return without putting the xid that we got earlier. Restructure cifs_file_strict_mmap() and cifs_file_mmap() to be more similar to each other and have a single point of return that always puts the xid. Signed-off-by: Matthew Wilcox Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/cifs/file.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 92fdf9c35de215..7d6539a04fac96 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3488,20 +3488,18 @@ static const struct vm_operations_struct cifs_file_vm_ops = { int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) { - int rc, xid; + int xid, rc = 0; struct inode *inode = file_inode(file); xid = get_xid(); - if (!CIFS_CACHE_READ(CIFS_I(inode))) { + if (!CIFS_CACHE_READ(CIFS_I(inode))) rc = cifs_zap_mapping(inode); - if (rc) - return rc; - } - - rc = generic_file_mmap(file, vma); - if (rc == 0) + if (!rc) + rc = generic_file_mmap(file, vma); + if (!rc) vma->vm_ops = &cifs_file_vm_ops; + free_xid(xid); return rc; } @@ -3511,16 +3509,16 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) int rc, xid; xid = get_xid(); + rc = cifs_revalidate_file(file); - if (rc) { + if (rc) cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n", rc); - free_xid(xid); - return rc; - } - rc = generic_file_mmap(file, vma); - if (rc == 0) + if (!rc) + rc = generic_file_mmap(file, vma); + if (!rc) vma->vm_ops = &cifs_file_vm_ops; + free_xid(xid); return rc; } From be6874b4d480d12ec120e195f35091d831ca97bc Mon Sep 17 00:00:00 2001 From: Daniel N Pettersson Date: Thu, 11 Jan 2018 16:00:12 +0100 Subject: [PATCH 1787/3326] cifs: Fix autonegotiate security settings mismatch commit 9aca7e454415f7878b28524e76bebe1170911a88 upstream. Autonegotiation gives a security settings mismatch error if the SMB server selects an SMBv3 dialect that isn't SMB3.02. The exact error is "protocol revalidation - security settings mismatch". This can be tested using Samba v4.2 or by setting the global Samba setting max protocol = SMB3_00. The check that fails in smb3_validate_negotiate is the dialect verification of the negotiate info response. This is because it tries to verify against the protocol_id in the global smbdefault_values. The protocol_id in smbdefault_values is SMB3.02. In SMB2_negotiate the protocol_id in smbdefault_values isn't updated, it is global so it probably shouldn't be, but server->dialect is. This patch changes the check in smb3_validate_negotiate to use server->dialect instead of server->vals->protocol_id. The patch works with autonegotiate and when using a specific version in the vers mount option. Signed-off-by: Daniel N Pettersson Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/cifs/smb2pdu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 01346b8b6edb38..66af1f8a13cc71 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -733,8 +733,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) } /* check validate negotiate info response matches what we got earlier */ - if (pneg_rsp->Dialect != - cpu_to_le16(tcon->ses->server->vals->protocol_id)) + if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect)) goto vneg_out; if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode)) From 4126cdb73193eb0778ae05d2162cbb3bcd778331 Mon Sep 17 00:00:00 2001 From: Aurelien Aptel Date: Thu, 25 Jan 2018 15:59:39 +0100 Subject: [PATCH 1788/3326] CIFS: zero sensitive data when freeing commit 97f4b7276b829a8927ac903a119bef2f963ccc58 upstream. also replaces memset()+kfree() by kzfree(). Signed-off-by: Aurelien Aptel Signed-off-by: Steve French Reviewed-by: Pavel Shilovsky Signed-off-by: Greg Kroah-Hartman --- fs/cifs/cifsencrypt.c | 3 +-- fs/cifs/connect.c | 6 +++--- fs/cifs/misc.c | 14 ++++---------- 3 files changed, 8 insertions(+), 15 deletions(-) diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 68abbb0db60898..f2b0a7f124da94 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -325,9 +325,8 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt, { int i; int rc; - char password_with_pad[CIFS_ENCPWD_SIZE]; + char password_with_pad[CIFS_ENCPWD_SIZE] = {0}; - memset(password_with_pad, 0, CIFS_ENCPWD_SIZE); if (password) strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 0bfc2280436df8..f7db2fedfa8c63 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1707,7 +1707,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, tmp_end++; if (!(tmp_end < end && tmp_end[1] == delim)) { /* No it is not. Set the password to NULL */ - kfree(vol->password); + kzfree(vol->password); vol->password = NULL; break; } @@ -1745,7 +1745,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, options = end; } - kfree(vol->password); + kzfree(vol->password); /* Now build new password string */ temp_len = strlen(value); vol->password = kzalloc(temp_len+1, GFP_KERNEL); @@ -4235,7 +4235,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) reset_cifs_unix_caps(0, tcon, NULL, vol_info); out: kfree(vol_info->username); - kfree(vol_info->password); + kzfree(vol_info->password); kfree(vol_info); return tcon; diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index eea93ac15ef03e..a0dbced4a45c35 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -98,14 +98,11 @@ sesInfoFree(struct cifs_ses *buf_to_free) kfree(buf_to_free->serverOS); kfree(buf_to_free->serverDomain); kfree(buf_to_free->serverNOS); - if (buf_to_free->password) { - memset(buf_to_free->password, 0, strlen(buf_to_free->password)); - kfree(buf_to_free->password); - } + kzfree(buf_to_free->password); kfree(buf_to_free->user_name); kfree(buf_to_free->domainName); - kfree(buf_to_free->auth_key.response); - kfree(buf_to_free); + kzfree(buf_to_free->auth_key.response); + kzfree(buf_to_free); } struct cifs_tcon * @@ -136,10 +133,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free) } atomic_dec(&tconInfoAllocCount); kfree(buf_to_free->nativeFileSystem); - if (buf_to_free->password) { - memset(buf_to_free->password, 0, strlen(buf_to_free->password)); - kfree(buf_to_free->password); - } + kzfree(buf_to_free->password); kfree(buf_to_free); } From 7adf1d63ef6567924695e3965418d0ded8c896c8 Mon Sep 17 00:00:00 2001 From: Andrew-sh Cheng Date: Fri, 8 Dec 2017 14:07:56 +0800 Subject: [PATCH 1789/3326] cpufreq: mediatek: add mediatek related projects into blacklist commit 6066998cbd2b1012a8d5bc9a2957cfd0ad53150e upstream. mediatek projects will use mediate-cpufreq.c as cpufreq driver, instead of using cpufreq_dt.c Add mediatek related projects into cpufreq-dt blacklist Signed-off-by: Andrew-sh Cheng Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki Signed-off-by: Sean Wang Signed-off-by: Greg Kroah-Hartman --- drivers/cpufreq/cpufreq-dt-platdev.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index a753c50e9e412e..9e0aa767bbbe68 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -111,6 +111,14 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "marvell,armadaxp", }, + { .compatible = "mediatek,mt2701", }, + { .compatible = "mediatek,mt2712", }, + { .compatible = "mediatek,mt7622", }, + { .compatible = "mediatek,mt7623", }, + { .compatible = "mediatek,mt817x", }, + { .compatible = "mediatek,mt8173", }, + { .compatible = "mediatek,mt8176", }, + { .compatible = "nvidia,tegra124", }, { .compatible = "st,stih407", }, From 69373cdc4ae4eacac2eb19da03eb0ad2bb0eda7f Mon Sep 17 00:00:00 2001 From: Yang Shunyong Date: Mon, 29 Jan 2018 14:40:11 +0800 Subject: [PATCH 1790/3326] dmaengine: dmatest: fix container_of member in dmatest_callback commit 66b3bd2356e0a1531c71a3dcf96944621e25c17c upstream. The type of arg passed to dmatest_callback is struct dmatest_done. It refers to test_done in struct dmatest_thread, not done_wait. Fixes: 6f6a23a213be ("dmaengine: dmatest: move callback wait ...") Signed-off-by: Yang Shunyong Acked-by: Adam Wallis Signed-off-by: Vinod Koul Signed-off-by: Greg Kroah-Hartman --- drivers/dma/dmatest.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index ec5f9d2bc8202f..80cc2be6483cb8 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -355,7 +355,7 @@ static void dmatest_callback(void *arg) { struct dmatest_done *done = arg; struct dmatest_thread *thread = - container_of(arg, struct dmatest_thread, done_wait); + container_of(done, struct dmatest_thread, test_done); if (!thread->done) { done->done = true; wake_up_all(done->wait); From e94a7de2a3d2da42f0d124c7501195f1cdffa232 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Tue, 5 Dec 2017 23:15:31 -0800 Subject: [PATCH 1791/3326] sched/wait: Fix add_wait_queue() behavioral change commit c6b9d9a33029014446bd9ed84c1688f6d3d4eab9 upstream. The following cleanup commit: 50816c48997a ("sched/wait: Standardize internal naming of wait-queue entries") ... unintentionally changed the behavior of add_wait_queue() from inserting the wait entry at the head of the wait queue to the tail of the wait queue. Beyond a negative performance impact this change in behavior theoretically also breaks wait queues which mix exclusive and non-exclusive waiters, as non-exclusive waiters will not be woken up if they are queued behind enough exclusive waiters. Signed-off-by: Omar Sandoval Reviewed-by: Jens Axboe Acked-by: Peter Zijlstra Cc: Linus Torvalds Cc: Thomas Gleixner Cc: kernel-team@fb.com Fixes: ("sched/wait: Standardize internal naming of wait-queue entries") Link: http://lkml.kernel.org/r/a16c8ccffd39bd08fdaa45a5192294c784b803a7.1512544324.git.osandov@fb.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- kernel/sched/wait.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 98feab7933c76a..929ecb7d6b78a7 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -27,7 +27,7 @@ void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&wq_head->lock, flags); - __add_wait_queue_entry_tail(wq_head, wq_entry); + __add_wait_queue(wq_head, wq_entry); spin_unlock_irqrestore(&wq_head->lock, flags); } EXPORT_SYMBOL(add_wait_queue); From fd7467f82e58e7b6579cac0c9e883495df15f163 Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Thu, 9 Nov 2017 14:39:55 +0100 Subject: [PATCH 1792/3326] watchdog: gpio_wdt: set WDOG_HW_RUNNING in gpio_wdt_stop commit bc137dfdbec27c0ec5731a89002daded4a4aa1ea upstream. The first patch above (https://patchwork.kernel.org/patch/9970181/) makes the oops go away, but it just papers over the problem. The real problem is that the watchdog core clears WDOG_HW_RUNNING in watchdog_stop, and the gpio driver fails to set it in its stop function when it doesn't actually stop it. This means that the core doesn't know that it now has responsibility for petting the device, in turn causing the device to reset the system (I hadn't noticed this because the board I'm working on has that reset logic disabled). How about this (other drivers may of course have the same problem, I haven't checked). One might say that ->stop should return an error when the device can't be stopped, but OTOH this brings parity between a device without a ->stop method and a GPIO wd that has always-running set. IOW, I think ->stop should only return an error when an actual attempt to stop the hardware failed. From: Rasmus Villemoes The watchdog framework clears WDOG_HW_RUNNING before calling ->stop. If the driver is unable to stop the device, it is supposed to set that bit again so that the watchdog core takes care of sending heart-beats while the device is not open from user-space. Update the gpio_wdt driver to honour that contract (and get rid of the redundant clearing of WDOG_HW_RUNNING). Fixes: 3c10bbde10 ("watchdog: core: Clear WDOG_HW_RUNNING before calling the stop function") Signed-off-by: Rasmus Villemoes Reviewed-by: Guenter Roeck Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck Signed-off-by: Greg Kroah-Hartman --- drivers/watchdog/gpio_wdt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c index cb66c2f99ff15d..7a6279daa8b909 100644 --- a/drivers/watchdog/gpio_wdt.c +++ b/drivers/watchdog/gpio_wdt.c @@ -80,7 +80,8 @@ static int gpio_wdt_stop(struct watchdog_device *wdd) if (!priv->always_running) { gpio_wdt_disable(priv); - clear_bit(WDOG_HW_RUNNING, &wdd->status); + } else { + set_bit(WDOG_HW_RUNNING, &wdd->status); } return 0; From 0b69ec336d3d5c50710e19aaf0ca5f738fc20bc3 Mon Sep 17 00:00:00 2001 From: Shanker Donthineni Date: Mon, 11 Dec 2017 16:42:31 -0600 Subject: [PATCH 1793/3326] arm64: Define cputype macros for Falkor CPU commit c622cc013cece073722592cff1ac6643a33b1622 upstream. Add cputype definition macros for Qualcomm Datacenter Technologies Falkor CPU in cputype.h. It's unfortunate that the first revision of the Falkor CPU used the wrong part number 0x800, got fixed in v2 chip with part number 0xC00, and would be used the same value for future revisions. Signed-off-by: Shanker Donthineni Signed-off-by: Will Deacon Cc: Timur Tabi Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/cputype.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 235e77d982610a..cbf08d7cbf3089 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -91,6 +91,7 @@ #define BRCM_CPU_PART_VULCAN 0x516 #define QCOM_CPU_PART_FALKOR_V1 0x800 +#define QCOM_CPU_PART_FALKOR 0xC00 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) @@ -99,6 +100,7 @@ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) +#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) #ifndef __ASSEMBLY__ From 4c54eab372f8cd3f3b1f0a07bc484b06eed3c5a0 Mon Sep 17 00:00:00 2001 From: Shanker Donthineni Date: Mon, 11 Dec 2017 16:42:32 -0600 Subject: [PATCH 1794/3326] arm64: Add software workaround for Falkor erratum 1041 commit 932b50c7c1c65e6f23002e075b97ee083c4a9e71 upstream. The ARM architecture defines the memory locations that are permitted to be accessed as the result of a speculative instruction fetch from an exception level for which all stages of translation are disabled. Specifically, the core is permitted to speculatively fetch from the 4KB region containing the current program counter 4K and next 4K. When translation is changed from enabled to disabled for the running exception level (SCTLR_ELn[M] changed from a value of 1 to 0), the Falkor core may errantly speculatively access memory locations outside of the 4KB region permitted by the architecture. The errant memory access may lead to one of the following unexpected behaviors. 1) A System Error Interrupt (SEI) being raised by the Falkor core due to the errant memory access attempting to access a region of memory that is protected by a slave-side memory protection unit. 2) Unpredictable device behavior due to a speculative read from device memory. This behavior may only occur if the instruction cache is disabled prior to or coincident with translation being changed from enabled to disabled. The conditions leading to this erratum will not occur when either of the following occur: 1) A higher exception level disables translation of a lower exception level (e.g. EL2 changing SCTLR_EL1[M] from a value of 1 to 0). 2) An exception level disabling its stage-1 translation if its stage-2 translation is enabled (e.g. EL1 changing SCTLR_EL1[M] from a value of 1 to 0 when HCR_EL2[VM] has a value of 1). To avoid the errant behavior, software must execute an ISB immediately prior to executing the MSR that will change SCTLR_ELn[M] from 1 to 0. Signed-off-by: Shanker Donthineni Signed-off-by: Will Deacon Cc: Timur Tabi Signed-off-by: Greg Kroah-Hartman --- Documentation/arm64/silicon-errata.txt | 1 + arch/arm64/Kconfig | 10 ++++++++++ arch/arm64/include/asm/assembler.h | 10 ++++++++++ arch/arm64/kernel/cpu-reset.S | 1 + arch/arm64/kernel/efi-entry.S | 2 ++ arch/arm64/kernel/head.S | 1 + arch/arm64/kernel/relocate_kernel.S | 1 + arch/arm64/kvm/hyp-init.S | 1 + 8 files changed, 27 insertions(+) diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index 66e8ce14d23d03..704770c0edf2e3 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -74,3 +74,4 @@ stable kernels. | Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | | Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | +| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0df64a6a56d498..8f73eaccf50a8a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -539,6 +539,16 @@ config QCOM_QDF2400_ERRATUM_0065 If unsure, say Y. +config QCOM_FALKOR_ERRATUM_E1041 + bool "Falkor E1041: Speculative instruction fetches might cause errant memory access" + default y + help + Falkor CPU may speculatively fetch instructions from an improper + memory location when MMU translation is changed from SCTLR_ELn[M]=1 + to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem. + + If unsure, say Y. + endmenu diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index d58a6253c6abc4..7896728f3b220a 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -499,4 +499,14 @@ alternative_else_nop_endif #endif .endm +/** + * Errata workaround prior to disable MMU. Insert an ISB immediately prior + * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0. + */ + .macro pre_disable_mmu_workaround +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041 + isb +#endif + .endm + #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index 65f42d2574142d..2a752cb2a0f35a 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S @@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart) mrs x12, sctlr_el1 ldr x13, =SCTLR_ELx_FLAGS bic x12, x12, x13 + pre_disable_mmu_workaround msr sctlr_el1, x12 isb diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index 4e6ad355bd058e..6b9736c3fb5630 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S @@ -96,6 +96,7 @@ ENTRY(entry) mrs x0, sctlr_el2 bic x0, x0, #1 << 0 // clear SCTLR.M bic x0, x0, #1 << 2 // clear SCTLR.C + pre_disable_mmu_workaround msr sctlr_el2, x0 isb b 2f @@ -103,6 +104,7 @@ ENTRY(entry) mrs x0, sctlr_el1 bic x0, x0, #1 << 0 // clear SCTLR.M bic x0, x0, #1 << 2 // clear SCTLR.C + pre_disable_mmu_workaround msr sctlr_el1, x0 isb 2: diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 0b243ecaf7ac87..6657ae4f3c7a39 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -732,6 +732,7 @@ __primary_switch: * to take into account by discarding the current kernel mapping and * creating a new one. */ + pre_disable_mmu_workaround msr sctlr_el1, x20 // disable the MMU isb bl __create_page_tables // recreate kernel mapping diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index ce704a4aeadd43..f407e422a7200b 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel) mrs x0, sctlr_el2 ldr x1, =SCTLR_ELx_FLAGS bic x0, x0, x1 + pre_disable_mmu_workaround msr sctlr_el2, x0 isb 1: diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 3f961558237766..870828c364c508 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -151,6 +151,7 @@ reset: mrs x5, sctlr_el2 ldr x6, =SCTLR_ELx_FLAGS bic x5, x5, x6 // Clear SCTL_M and etc + pre_disable_mmu_workaround msr sctlr_el2, x5 isb From fea5349eb88bf8e95b0574a7c7d30040c2f07c9c Mon Sep 17 00:00:00 2001 From: Haozhong Zhang Date: Wed, 10 Jan 2018 21:44:42 +0800 Subject: [PATCH 1795/3326] KVM MMU: check pending exception before injecting APF commit 2a266f23550be997d783f27e704b9b40c4010292 upstream. For example, when two APF's for page ready happen after one exit and the first one becomes pending, the second one will result in #DF. Instead, just handle the second page fault synchronously. Reported-by: Ross Zwisler Message-ID: Reported-by: Alec Blayne Signed-off-by: Haozhong Zhang Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/mmu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 0fce8d73403c3b..beb7f8795bc1cc 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3784,7 +3784,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) { if (unlikely(!lapic_in_kernel(vcpu) || - kvm_event_needs_reinjection(vcpu))) + kvm_event_needs_reinjection(vcpu) || + vcpu->arch.exception.pending)) return false; if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) From 9c41a8453c826b58e470d23302d0e10aba3fd168 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Tue, 23 Jan 2018 20:45:37 -0500 Subject: [PATCH 1796/3326] sched/rt: Use container_of() to get root domain in rto_push_irq_work_func() commit ad0f1d9d65938aec72a698116cd73a980916895e upstream. When the rto_push_irq_work_func() is called, it looks at the RT overloaded bitmask in the root domain via the runqueue (rq->rd). The problem is that during CPU up and down, nothing here stops rq->rd from changing between taking the rq->rd->rto_lock and releasing it. That means the lock that is released is not the same lock that was taken. Instead of using this_rq()->rd to get the root domain, as the irq work is part of the root domain, we can simply get the root domain from the irq work that is passed to the routine: container_of(work, struct root_domain, rto_push_work) This keeps the root domain consistent. Reported-by: Pavan Kondeti Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 4bdced5c9a292 ("sched/rt: Simplify the IPI based RT balancing logic") Link: http://lkml.kernel.org/r/CAEU1=PkiHO35Dzna8EQqNSKW1fr1y1zRQ5y66X117MG06sQtNA@mail.gmail.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- kernel/sched/rt.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 7464c5c4de4678..6f42703edc4727 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1907,9 +1907,8 @@ static void push_rt_tasks(struct rq *rq) * the rt_loop_next will cause the iterator to perform another scan. * */ -static int rto_next_cpu(struct rq *rq) +static int rto_next_cpu(struct root_domain *rd) { - struct root_domain *rd = rq->rd; int next; int cpu; @@ -1985,7 +1984,7 @@ static void tell_cpu_to_push(struct rq *rq) * Otherwise it is finishing up and an ipi needs to be sent. */ if (rq->rd->rto_cpu < 0) - cpu = rto_next_cpu(rq); + cpu = rto_next_cpu(rq->rd); raw_spin_unlock(&rq->rd->rto_lock); @@ -1998,6 +1997,8 @@ static void tell_cpu_to_push(struct rq *rq) /* Called from hardirq context */ void rto_push_irq_work_func(struct irq_work *work) { + struct root_domain *rd = + container_of(work, struct root_domain, rto_push_work); struct rq *rq; int cpu; @@ -2013,18 +2014,18 @@ void rto_push_irq_work_func(struct irq_work *work) raw_spin_unlock(&rq->lock); } - raw_spin_lock(&rq->rd->rto_lock); + raw_spin_lock(&rd->rto_lock); /* Pass the IPI to the next rt overloaded queue */ - cpu = rto_next_cpu(rq); + cpu = rto_next_cpu(rd); - raw_spin_unlock(&rq->rd->rto_lock); + raw_spin_unlock(&rd->rto_lock); if (cpu < 0) return; /* Try the next RT overloaded CPU */ - irq_work_queue_on(&rq->rd->rto_push_work, cpu); + irq_work_queue_on(&rd->rto_push_work, cpu); } #endif /* HAVE_RT_PUSH_IPI */ From d9c3131f2ab45eacb6641b29ea6ee69039c727a7 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Tue, 23 Jan 2018 20:45:38 -0500 Subject: [PATCH 1797/3326] sched/rt: Up the root domain ref count when passing it around via IPIs commit 364f56653708ba8bcdefd4f0da2a42904baa8eeb upstream. When issuing an IPI RT push, where an IPI is sent to each CPU that has more than one RT task scheduled on it, it references the root domain's rto_mask, that contains all the CPUs within the root domain that has more than one RT task in the runable state. The problem is, after the IPIs are initiated, the rq->lock is released. This means that the root domain that is associated to the run queue could be freed while the IPIs are going around. Add a sched_get_rd() and a sched_put_rd() that will increment and decrement the root domain's ref count respectively. This way when initiating the IPIs, the scheduler will up the root domain's ref count before releasing the rq->lock, ensuring that the root domain does not go away until the IPI round is complete. Reported-by: Pavan Kondeti Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 4bdced5c9a292 ("sched/rt: Simplify the IPI based RT balancing logic") Link: http://lkml.kernel.org/r/CAEU1=PkiHO35Dzna8EQqNSKW1fr1y1zRQ5y66X117MG06sQtNA@mail.gmail.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- kernel/sched/rt.c | 9 +++++++-- kernel/sched/sched.h | 2 ++ kernel/sched/topology.c | 13 +++++++++++++ 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 6f42703edc4727..298f62b8662d4a 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1990,8 +1990,11 @@ static void tell_cpu_to_push(struct rq *rq) rto_start_unlock(&rq->rd->rto_loop_start); - if (cpu >= 0) + if (cpu >= 0) { + /* Make sure the rd does not get freed while pushing */ + sched_get_rd(rq->rd); irq_work_queue_on(&rq->rd->rto_push_work, cpu); + } } /* Called from hardirq context */ @@ -2021,8 +2024,10 @@ void rto_push_irq_work_func(struct irq_work *work) raw_spin_unlock(&rd->rto_lock); - if (cpu < 0) + if (cpu < 0) { + sched_put_rd(rd); return; + } /* Try the next RT overloaded CPU */ irq_work_queue_on(&rd->rto_push_work, cpu); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b732e779fe7d55..307c35d3366097 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -661,6 +661,8 @@ extern struct mutex sched_domains_mutex; extern void init_defrootdomain(void); extern int sched_init_domains(const struct cpumask *cpu_map); extern void rq_attach_root(struct rq *rq, struct root_domain *rd); +extern void sched_get_rd(struct root_domain *rd); +extern void sched_put_rd(struct root_domain *rd); #ifdef HAVE_RT_PUSH_IPI extern void rto_push_irq_work_func(struct irq_work *work); diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 093f2ceba2e241..659e075ef70b7d 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -258,6 +258,19 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd) call_rcu_sched(&old_rd->rcu, free_rootdomain); } +void sched_get_rd(struct root_domain *rd) +{ + atomic_inc(&rd->refcount); +} + +void sched_put_rd(struct root_domain *rd) +{ + if (!atomic_dec_and_test(&rd->refcount)) + return; + + call_rcu_sched(&rd->rcu, free_rootdomain); +} + static int init_rootdomain(struct root_domain *rd) { if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) From 19d8e5122fefc3e424050a78063f0d385433e191 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 17 Nov 2017 21:19:08 +0200 Subject: [PATCH 1798/3326] drm/i915: Add .get_hw_state() method for planes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d87ce76402950b8e4d5117276d44465658e886a4 upstream. Add a .get_hw_state() method for planes, returning true or false depending on whether the plane is enabled. Use it to rewrite the plane enabled/disabled asserts in platform agnostic fashion. We do lose the pre-gen4 plane<->pipe mapping checks, but since we're supposed sanitize that anyway it doesn't really matter. v2: Reoder patches to not depend on enum old_plane_id Just call assert_plane_disabled() from assert_planes_disabled() v3: Deal with disabled power wells in .get_hw_state() v4: Rebase due skl primary plane code removal Cc: Thierry Reding Cc: Alex Villacís Lasso Reviewed-by: Daniel Vetter #v2 Tested-by: Thierry Reding #v2 Link: https://patchwork.freedesktop.org/patch/msgid/20171117191917.11506-2-ville.syrjala@linux.intel.com Signed-off-by: Ville Syrjälä (cherry picked from commit 51f5a096398433a881e845d3685a2c1dac756019) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_display.c | 188 +++++++++++++-------------- drivers/gpu/drm/i915/intel_drv.h | 2 + drivers/gpu/drm/i915/intel_sprite.c | 83 ++++++++++++ 3 files changed, 175 insertions(+), 98 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 095a2240af4f42..47094d4a8f0efe 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1211,23 +1211,6 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) pipe_name(pipe)); } -static void assert_cursor(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state) -{ - bool cur_state; - - if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) - cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; - else - cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; - - I915_STATE_WARN(cur_state != state, - "cursor on pipe %c assertion failure (expected %s, current %s)\n", - pipe_name(pipe), onoff(state), onoff(cur_state)); -} -#define assert_cursor_enabled(d, p) assert_cursor(d, p, true) -#define assert_cursor_disabled(d, p) assert_cursor(d, p, false) - void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { @@ -1255,77 +1238,25 @@ void assert_pipe(struct drm_i915_private *dev_priv, pipe_name(pipe), onoff(state), onoff(cur_state)); } -static void assert_plane(struct drm_i915_private *dev_priv, - enum plane plane, bool state) +static void assert_plane(struct intel_plane *plane, bool state) { - u32 val; - bool cur_state; + bool cur_state = plane->get_hw_state(plane); - val = I915_READ(DSPCNTR(plane)); - cur_state = !!(val & DISPLAY_PLANE_ENABLE); I915_STATE_WARN(cur_state != state, - "plane %c assertion failure (expected %s, current %s)\n", - plane_name(plane), onoff(state), onoff(cur_state)); + "%s assertion failure (expected %s, current %s)\n", + plane->base.name, onoff(state), onoff(cur_state)); } -#define assert_plane_enabled(d, p) assert_plane(d, p, true) -#define assert_plane_disabled(d, p) assert_plane(d, p, false) - -static void assert_planes_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - int i; - - /* Primary planes are fixed to pipes on gen4+ */ - if (INTEL_GEN(dev_priv) >= 4) { - u32 val = I915_READ(DSPCNTR(pipe)); - I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, - "plane %c assertion failure, should be disabled but not\n", - plane_name(pipe)); - return; - } - - /* Need to check both planes against the pipe */ - for_each_pipe(dev_priv, i) { - u32 val = I915_READ(DSPCNTR(i)); - enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> - DISPPLANE_SEL_PIPE_SHIFT; - I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, - "plane %c assertion failure, should be off on pipe %c but is still active\n", - plane_name(i), pipe_name(pipe)); - } -} +#define assert_plane_enabled(p) assert_plane(p, true) +#define assert_plane_disabled(p) assert_plane(p, false) -static void assert_sprites_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe) +static void assert_planes_disabled(struct intel_crtc *crtc) { - int sprite; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane; - if (INTEL_GEN(dev_priv) >= 9) { - for_each_sprite(dev_priv, pipe, sprite) { - u32 val = I915_READ(PLANE_CTL(pipe, sprite)); - I915_STATE_WARN(val & PLANE_CTL_ENABLE, - "plane %d assertion failure, should be off on pipe %c but is still active\n", - sprite, pipe_name(pipe)); - } - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - for_each_sprite(dev_priv, pipe, sprite) { - u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite)); - I915_STATE_WARN(val & SP_ENABLE, - "sprite %c assertion failure, should be off on pipe %c but is still active\n", - sprite_name(pipe, sprite), pipe_name(pipe)); - } - } else if (INTEL_GEN(dev_priv) >= 7) { - u32 val = I915_READ(SPRCTL(pipe)); - I915_STATE_WARN(val & SPRITE_ENABLE, - "sprite %c assertion failure, should be off on pipe %c but is still active\n", - plane_name(pipe), pipe_name(pipe)); - } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { - u32 val = I915_READ(DVSCNTR(pipe)); - I915_STATE_WARN(val & DVS_ENABLE, - "sprite %c assertion failure, should be off on pipe %c but is still active\n", - plane_name(pipe), pipe_name(pipe)); - } + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) + assert_plane_disabled(plane); } static void assert_vblank_disabled(struct drm_crtc *crtc) @@ -1926,9 +1857,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc) DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); - assert_planes_disabled(dev_priv, pipe); - assert_cursor_disabled(dev_priv, pipe); - assert_sprites_disabled(dev_priv, pipe); + assert_planes_disabled(crtc); /* * A pipe without a PLL won't actually be able to drive bits from @@ -1997,9 +1926,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc) * Make sure planes won't keep trying to pump pixels to us, * or we might hang the display. */ - assert_planes_disabled(dev_priv, pipe); - assert_cursor_disabled(dev_priv, pipe); - assert_sprites_disabled(dev_priv, pipe); + assert_planes_disabled(crtc); reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); @@ -3397,6 +3324,31 @@ static void i9xx_disable_primary_plane(struct intel_plane *primary, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +static bool i9xx_plane_get_hw_state(struct intel_plane *primary) +{ + + struct drm_i915_private *dev_priv = to_i915(primary->base.dev); + enum intel_display_power_domain power_domain; + enum plane plane = primary->plane; + enum pipe pipe = primary->pipe; + bool ret; + + /* + * Not 100% correct for planes that can move between pipes, + * but that's only the case for gen2-4 which don't have any + * display power wells. + */ + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static u32 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane) { @@ -4973,7 +4925,8 @@ void hsw_enable_ips(struct intel_crtc *crtc) * a vblank wait. */ - assert_plane_enabled(dev_priv, crtc->plane); + assert_plane_enabled(to_intel_plane(crtc->base.primary)); + if (IS_BROADWELL(dev_priv)) { mutex_lock(&dev_priv->rps.hw_lock); WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); @@ -5005,7 +4958,8 @@ void hsw_disable_ips(struct intel_crtc *crtc) if (!crtc->config->ips_enabled) return; - assert_plane_enabled(dev_priv, crtc->plane); + assert_plane_enabled(to_intel_plane(crtc->base.primary)); + if (IS_BROADWELL(dev_priv)) { mutex_lock(&dev_priv->rps.hw_lock); WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); @@ -9577,6 +9531,23 @@ static void i845_disable_cursor(struct intel_plane *plane, i845_update_cursor(plane, NULL, NULL); } +static bool i845_cursor_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(PIPE_A); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -9770,6 +9741,28 @@ static void i9xx_disable_cursor(struct intel_plane *plane, i9xx_update_cursor(plane, NULL, NULL); } +static bool i9xx_cursor_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum pipe pipe = plane->pipe; + bool ret; + + /* + * Not 100% correct for planes that can move between pipes, + * but that's only the case for gen2-3 which don't have any + * display power wells. + */ + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} /* VESA 640x480x72Hz mode to set on the pipe */ static struct drm_display_mode load_detect_mode = { @@ -13240,6 +13233,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->update_plane = skylake_update_primary_plane; primary->disable_plane = skylake_disable_primary_plane; + primary->get_hw_state = skl_plane_get_hw_state; } else if (INTEL_GEN(dev_priv) >= 9) { intel_primary_formats = skl_primary_formats; num_formats = ARRAY_SIZE(skl_primary_formats); @@ -13250,6 +13244,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->update_plane = skylake_update_primary_plane; primary->disable_plane = skylake_disable_primary_plane; + primary->get_hw_state = skl_plane_get_hw_state; } else if (INTEL_GEN(dev_priv) >= 4) { intel_primary_formats = i965_primary_formats; num_formats = ARRAY_SIZE(i965_primary_formats); @@ -13257,6 +13252,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->update_plane = i9xx_update_primary_plane; primary->disable_plane = i9xx_disable_primary_plane; + primary->get_hw_state = i9xx_plane_get_hw_state; } else { intel_primary_formats = i8xx_primary_formats; num_formats = ARRAY_SIZE(i8xx_primary_formats); @@ -13264,6 +13260,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->update_plane = i9xx_update_primary_plane; primary->disable_plane = i9xx_disable_primary_plane; + primary->get_hw_state = i9xx_plane_get_hw_state; } if (INTEL_GEN(dev_priv) >= 9) @@ -13353,10 +13350,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { cursor->update_plane = i845_update_cursor; cursor->disable_plane = i845_disable_cursor; + cursor->get_hw_state = i845_cursor_get_hw_state; cursor->check_plane = i845_check_cursor; } else { cursor->update_plane = i9xx_update_cursor; cursor->disable_plane = i9xx_disable_cursor; + cursor->get_hw_state = i9xx_cursor_get_hw_state; cursor->check_plane = i9xx_check_cursor; } @@ -14704,8 +14703,8 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", pipe_name(pipe)); - assert_plane_disabled(dev_priv, PLANE_A); - assert_plane_disabled(dev_priv, PLANE_B); + assert_planes_disabled(intel_get_crtc_for_pipe(dev_priv, PIPE_A)); + assert_planes_disabled(intel_get_crtc_for_pipe(dev_priv, PIPE_B)); I915_WRITE(PIPECONF(pipe), 0); POSTING_READ(PIPECONF(pipe)); @@ -14918,20 +14917,13 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv) intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); } -static bool primary_get_hw_state(struct intel_plane *plane) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - - return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; -} - /* FIXME read out full plane state for all planes */ static void readout_plane_state(struct intel_crtc *crtc) { struct intel_plane *primary = to_intel_plane(crtc->base.primary); bool visible; - visible = crtc->active && primary_get_hw_state(primary); + visible = crtc->active && primary->get_hw_state(primary); intel_set_plane_visible(to_intel_crtc_state(crtc->base.state), to_intel_plane_state(primary->base.state), diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 79fbaf78f6044b..10ae9681f02d34 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -863,6 +863,7 @@ struct intel_plane { const struct intel_plane_state *plane_state); void (*disable_plane)(struct intel_plane *plane, struct intel_crtc *crtc); + bool (*get_hw_state)(struct intel_plane *plane); int (*check_plane)(struct intel_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state); @@ -1885,6 +1886,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); void intel_pipe_update_start(struct intel_crtc *crtc); void intel_pipe_update_end(struct intel_crtc *crtc); +bool skl_plane_get_hw_state(struct intel_plane *plane); /* intel_tv.c */ void intel_tv_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 524933b014838c..f8ebeb5ffb9608 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -324,6 +324,26 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +bool +skl_plane_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static void chv_update_csc(struct intel_plane *plane, uint32_t format) { @@ -501,6 +521,26 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +static bool +vlv_plane_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -641,6 +681,25 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +static bool +ivb_plane_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum pipe pipe = plane->pipe; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -772,6 +831,25 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +static bool +g4x_plane_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum pipe pipe = plane->pipe; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static int intel_check_sprite_plane(struct intel_plane *plane, struct intel_crtc_state *crtc_state, @@ -1227,6 +1305,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = skl_update_plane; intel_plane->disable_plane = skl_disable_plane; + intel_plane->get_hw_state = skl_plane_get_hw_state; plane_formats = skl_plane_formats; num_plane_formats = ARRAY_SIZE(skl_plane_formats); @@ -1237,6 +1316,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = skl_update_plane; intel_plane->disable_plane = skl_disable_plane; + intel_plane->get_hw_state = skl_plane_get_hw_state; plane_formats = skl_plane_formats; num_plane_formats = ARRAY_SIZE(skl_plane_formats); @@ -1247,6 +1327,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = vlv_update_plane; intel_plane->disable_plane = vlv_disable_plane; + intel_plane->get_hw_state = vlv_plane_get_hw_state; plane_formats = vlv_plane_formats; num_plane_formats = ARRAY_SIZE(vlv_plane_formats); @@ -1262,6 +1343,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = ivb_update_plane; intel_plane->disable_plane = ivb_disable_plane; + intel_plane->get_hw_state = ivb_plane_get_hw_state; plane_formats = snb_plane_formats; num_plane_formats = ARRAY_SIZE(snb_plane_formats); @@ -1272,6 +1354,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = g4x_update_plane; intel_plane->disable_plane = g4x_disable_plane; + intel_plane->get_hw_state = g4x_plane_get_hw_state; modifiers = i9xx_plane_format_modifiers; if (IS_GEN6(dev_priv)) { From 50018d09843cc4813f1c2b89ff8fbcdf883ab0a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 17 Nov 2017 21:19:09 +0200 Subject: [PATCH 1799/3326] drm/i915: Redo plane sanitation during readout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 23ac12732825901b3fc6ac720958d8bff9a0d6ec upstream. Unify the plane disabling during state readout by pulling the code into a new helper intel_plane_disable_noatomic(). We'll also read out the state of all planes, so that we know which planes really need to be diabled. Additonally we change the plane<->pipe mapping sanitation to work by simply disabling the offending planes instead of entire pipes. And we do it before we otherwise sanitize the crtcs, which means we don't have to worry about misassigned planes during crtc sanitation anymore. v2: Reoder patches to not depend on enum old_plane_id v3: s/for_each_pipe/for_each_intel_crtc/ Cc: Thierry Reding Cc: Alex Villacís Lasso Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103223 Reviewed-by: Daniel Vetter Tested-by: Thierry Reding Link: https://patchwork.freedesktop.org/patch/msgid/20171117191917.11506-3-ville.syrjala@linux.intel.com Signed-off-by: Ville Syrjälä (cherry picked from commit b1e01595a66dc206a2c75401ec4c285740537f3f) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_display.c | 114 +++++++++++++++------------ 1 file changed, 65 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 47094d4a8f0efe..751b0f0582bc94 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2756,6 +2756,23 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state, crtc_state->active_planes); } +static void intel_plane_disable_noatomic(struct intel_crtc *crtc, + struct intel_plane *plane) +{ + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + + intel_set_plane_visible(crtc_state, plane_state, false); + + if (plane->id == PLANE_PRIMARY) + intel_pre_disable_primary_noatomic(&crtc->base); + + trace_intel_disable_plane(&plane->base, crtc); + plane->disable_plane(plane, crtc); +} + static void intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct intel_initial_plane_config *plane_config) @@ -2813,12 +2830,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, * simplest solution is to just disable the primary plane now and * pretend the BIOS never had it enabled. */ - intel_set_plane_visible(to_intel_crtc_state(crtc_state), - to_intel_plane_state(plane_state), - false); - intel_pre_disable_primary_noatomic(&intel_crtc->base); - trace_intel_disable_plane(primary, intel_crtc); - intel_plane->disable_plane(intel_plane, intel_crtc); + intel_plane_disable_noatomic(intel_crtc, intel_plane); return; @@ -5954,6 +5966,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum intel_display_power_domain domain; + struct intel_plane *plane; u64 domains; struct drm_atomic_state *state; struct intel_crtc_state *crtc_state; @@ -5962,11 +5975,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, if (!intel_crtc->active) return; - if (crtc->primary->state->visible) { - intel_pre_disable_primary_noatomic(crtc); + for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) { + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); - intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); - crtc->primary->state->visible = false; + if (plane_state->base.visible) + intel_plane_disable_noatomic(intel_crtc, plane); } state = drm_atomic_state_alloc(crtc->dev); @@ -14715,22 +14729,36 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) POSTING_READ(DPLL(pipe)); } -static bool -intel_check_plane_mapping(struct intel_crtc *crtc) +static bool intel_plane_mapping_ok(struct intel_crtc *crtc, + struct intel_plane *primary) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 val; + enum plane plane = primary->plane; + u32 val = I915_READ(DSPCNTR(plane)); - if (INTEL_INFO(dev_priv)->num_pipes == 1) - return true; + return (val & DISPLAY_PLANE_ENABLE) == 0 || + (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe); +} - val = I915_READ(DSPCNTR(!crtc->plane)); +static void +intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; - if ((val & DISPLAY_PLANE_ENABLE) && - (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) - return false; + if (INTEL_GEN(dev_priv) >= 4) + return; - return true; + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_plane *plane = + to_intel_plane(crtc->base.primary); + + if (intel_plane_mapping_ok(crtc, plane)) + continue; + + DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n", + plane->base.name); + intel_plane_disable_noatomic(crtc, plane); + } } static bool intel_crtc_has_encoders(struct intel_crtc *crtc) @@ -14786,33 +14814,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, /* Disable everything but the primary plane */ for_each_intel_plane_on_crtc(dev, crtc, plane) { - if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) - continue; + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); - trace_intel_disable_plane(&plane->base, crtc); - plane->disable_plane(plane, crtc); + if (plane_state->base.visible && + plane->base.type != DRM_PLANE_TYPE_PRIMARY) + intel_plane_disable_noatomic(crtc, plane); } } - /* We need to sanitize the plane -> pipe mapping first because this will - * disable the crtc (and hence change the state) if it is wrong. Note - * that gen4+ has a fixed plane -> pipe mapping. */ - if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) { - bool plane; - - DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", - crtc->base.base.id, crtc->base.name); - - /* Pipe has the wrong plane attached and the plane is active. - * Temporarily change the plane mapping and disable everything - * ... */ - plane = crtc->plane; - crtc->base.primary->state->visible = true; - crtc->plane = !plane; - intel_crtc_disable_noatomic(&crtc->base, ctx); - crtc->plane = plane; - } - /* Adjust the state of the output pipe according to whether we * have active connectors/encoders. */ if (crtc->active && !intel_crtc_has_encoders(crtc)) @@ -14920,14 +14930,18 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv) /* FIXME read out full plane state for all planes */ static void readout_plane_state(struct intel_crtc *crtc) { - struct intel_plane *primary = to_intel_plane(crtc->base.primary); - bool visible; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane *plane; - visible = crtc->active && primary->get_hw_state(primary); + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + bool visible = plane->get_hw_state(plane); - intel_set_plane_visible(to_intel_crtc_state(crtc->base.state), - to_intel_plane_state(primary->base.state), - visible); + intel_set_plane_visible(crtc_state, plane_state, visible); + } } static void intel_modeset_readout_hw_state(struct drm_device *dev) @@ -15129,6 +15143,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev, /* HW state is read out, now we need to sanitize this mess. */ get_encoder_power_domains(dev_priv); + intel_sanitize_plane_mapping(dev_priv); + for_each_intel_encoder(dev, encoder) { intel_sanitize_encoder(encoder); } From 050b86b5bf2040a4be87f93510ffd780113c2065 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 29 Nov 2017 14:54:11 +0200 Subject: [PATCH 1800/3326] drm/i915: Fix deadlock in i830_disable_pipe() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 4488496d58200c7511842e049a4cc891d928da56 upstream. i830_disable_pipe() gets called from the power well code, and thus we're already holding the power domain mutex. That means we can't call plane->get_hw_state() as it will also try to grab the same mutex and will thus deadlock. Replace the assert_plane() calls (which calls ->get_hw_state()) with just raw register reads in i830_disable_pipe(). As a bonus we can now get a warning if plane C is enabled even though we don't even expose it as a drm plane. v2: Do a separate WARN_ON() for each plane (Chris) Cc: Chris Wilson Reviewed-by: Chris Wilson Fixes: d87ce7640295 ("drm/i915: Add .get_hw_state() method for planes") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20171129125411.29055-1-ville.syrjala@linux.intel.com (cherry picked from commit 5816d9cbc0a0fbf232fe297cefcb85361a3cde90) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_display.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 751b0f0582bc94..46485692db4897 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14717,8 +14717,11 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", pipe_name(pipe)); - assert_planes_disabled(intel_get_crtc_for_pipe(dev_priv, PIPE_A)); - assert_planes_disabled(intel_get_crtc_for_pipe(dev_priv, PIPE_B)); + WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE); + WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE); + WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE); + WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE); + WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE); I915_WRITE(PIPECONF(pipe), 0); POSTING_READ(PIPECONF(pipe)); From e5e1e153ecd40c2b09c23d2b2c161a6c8ddbba07 Mon Sep 17 00:00:00 2001 From: Mohamed Ghannam Date: Tue, 5 Dec 2017 20:58:35 +0000 Subject: [PATCH 1801/3326] dccp: CVE-2017-8824: use-after-free in DCCP code commit 69c64866ce072dea1d1e59a0d61e0f66c0dffb76 upstream. Whenever the sock object is in DCCP_CLOSED state, dccp_disconnect() must free dccps_hc_tx_ccid and dccps_hc_rx_ccid and set to NULL. Signed-off-by: Mohamed Ghannam Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/dccp/proto.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/dccp/proto.c b/net/dccp/proto.c index b68168fcc06aa1..9d43c1f4027408 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -259,6 +259,7 @@ int dccp_disconnect(struct sock *sk, int flags) { struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet = inet_sk(sk); + struct dccp_sock *dp = dccp_sk(sk); int err = 0; const int old_state = sk->sk_state; @@ -278,6 +279,10 @@ int dccp_disconnect(struct sock *sk, int flags) sk->sk_err = ECONNRESET; dccp_clear_xmit_timers(sk); + ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); + ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); + dp->dccps_hc_rx_ccid = NULL; + dp->dccps_hc_tx_ccid = NULL; __skb_queue_purge(&sk->sk_receive_queue); __skb_queue_purge(&sk->sk_write_queue); From fd31a38d268f50afe9c5cd3d4beafa020ad39e90 Mon Sep 17 00:00:00 2001 From: Malcolm Priestley Date: Tue, 26 Sep 2017 17:10:20 -0400 Subject: [PATCH 1802/3326] media: dvb-usb-v2: lmedm04: Improve logic checking of warm start commit 3d932ee27e852e4904647f15b64dedca51187ad7 upstream. Warm start has no check as whether a genuine device has connected and proceeds to next execution path. Check device should read 0x47 at offset of 2 on USB descriptor read and it is the amount requested of 6 bytes. Fix for kasan: CONFIG_KASAN_INLINE enabled kasan: GPF could be caused by NULL-ptr deref or user memory access as Reported-by: Andrey Konovalov Signed-off-by: Malcolm Priestley Signed-off-by: Mauro Carvalho Chehab Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/dvb-usb-v2/lmedm04.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index 5e320fa4a7950a..992f2011a6ba95 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c @@ -494,18 +494,23 @@ static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, static int lme2510_return_status(struct dvb_usb_device *d) { - int ret = 0; + int ret; u8 *data; - data = kzalloc(10, GFP_KERNEL); + data = kzalloc(6, GFP_KERNEL); if (!data) return -ENOMEM; - ret |= usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), - 0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200); - info("Firmware Status: %x (%x)", ret , data[2]); + ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), + 0x06, 0x80, 0x0302, 0x00, + data, 0x6, 200); + if (ret != 6) + ret = -EINVAL; + else + ret = data[2]; + + info("Firmware Status: %6ph", data); - ret = (ret < 0) ? -ENODEV : data[2]; kfree(data); return ret; } @@ -1189,6 +1194,7 @@ static int lme2510_get_adapter_count(struct dvb_usb_device *d) static int lme2510_identify_state(struct dvb_usb_device *d, const char **name) { struct lme2510_state *st = d->priv; + int status; usb_reset_configuration(d->udev); @@ -1197,12 +1203,16 @@ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name) st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware; - if (lme2510_return_status(d) == 0x44) { + status = lme2510_return_status(d); + if (status == 0x44) { *name = lme_firmware_switch(d, 0); return COLD; } - return 0; + if (status != 0x47) + return -EINVAL; + + return WARM; } static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type, From 2bd6279a4a8cd6c8d108b694c5ddb9cb53f6e35c Mon Sep 17 00:00:00 2001 From: Malcolm Priestley Date: Tue, 26 Sep 2017 17:10:21 -0400 Subject: [PATCH 1803/3326] media: dvb-usb-v2: lmedm04: move ts2020 attach to dm04_lme2510_tuner commit 7bf7a7116ed313c601307f7e585419369926ab05 upstream. When the tuner was split from m88rs2000 the attach function is in wrong place. Move to dm04_lme2510_tuner to trap errors on failure and removing a call to lme_coldreset. Prevents driver starting up without any tuner connected. Fixes to trap for ts2020 fail. LME2510(C): FE Found M88RS2000 ts2020: probe of 0-0060 failed with error -11 ... LME2510(C): TUN Found RS2000 tuner kasan: CONFIG_KASAN_INLINE enabled kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] PREEMPT SMP KASAN Reported-by: Andrey Konovalov Signed-off-by: Malcolm Priestley Tested-by: Andrey Konovalov Signed-off-by: Mauro Carvalho Chehab Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/dvb-usb-v2/lmedm04.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index 992f2011a6ba95..be26c029546bd3 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c @@ -1076,8 +1076,6 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap) if (adap->fe[0]) { info("FE Found M88RS2000"); - dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config, - &d->i2c_adap); st->i2c_tuner_gate_w = 5; st->i2c_tuner_gate_r = 5; st->i2c_tuner_addr = 0x60; @@ -1143,17 +1141,18 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap) ret = st->tuner_config; break; case TUNER_RS2000: - ret = st->tuner_config; + if (dvb_attach(ts2020_attach, adap->fe[0], + &ts2020_config, &d->i2c_adap)) + ret = st->tuner_config; break; default: break; } - if (ret) + if (ret) { info("TUN Found %s tuner", tun_msg[ret]); - else { - info("TUN No tuner found --- resetting device"); - lme_coldreset(d); + } else { + info("TUN No tuner found"); return -ENODEV; } From dd24d173b50af3aadb1ea2e04edaa8d68ca59b23 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Fri, 22 Sep 2017 09:07:06 -0400 Subject: [PATCH 1804/3326] media: hdpvr: Fix an error handling path in hdpvr_probe() commit c0f71bbb810237a38734607ca4599632f7f5d47f upstream. Here, hdpvr_register_videodev() is responsible for setup and register a video device. Also defining and initializing a worker. hdpvr_register_videodev() is calling by hdpvr_probe at last. So no need to flush any work here. Unregister v4l2, free buffers and memory. If hdpvr_probe() will fail. Signed-off-by: Arvind Yadav Reported-by: Andrey Konovalov Tested-by: Andrey Konovalov Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/hdpvr/hdpvr-core.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index dbe29c6c4d8b37..1e8cbaf3689617 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -292,7 +292,7 @@ static int hdpvr_probe(struct usb_interface *interface, /* register v4l2_device early so it can be used for printks */ if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { dev_err(&interface->dev, "v4l2_device_register failed\n"); - goto error; + goto error_free_dev; } mutex_init(&dev->io_mutex); @@ -301,7 +301,7 @@ static int hdpvr_probe(struct usb_interface *interface, dev->usbc_buf = kmalloc(64, GFP_KERNEL); if (!dev->usbc_buf) { v4l2_err(&dev->v4l2_dev, "Out of memory\n"); - goto error; + goto error_v4l2_unregister; } init_waitqueue_head(&dev->wait_buffer); @@ -339,13 +339,13 @@ static int hdpvr_probe(struct usb_interface *interface, } if (!dev->bulk_in_endpointAddr) { v4l2_err(&dev->v4l2_dev, "Could not find bulk-in endpoint\n"); - goto error; + goto error_put_usb; } /* init the device */ if (hdpvr_device_init(dev)) { v4l2_err(&dev->v4l2_dev, "device init failed\n"); - goto error; + goto error_put_usb; } mutex_lock(&dev->io_mutex); @@ -353,7 +353,7 @@ static int hdpvr_probe(struct usb_interface *interface, mutex_unlock(&dev->io_mutex); v4l2_err(&dev->v4l2_dev, "allocating transfer buffers failed\n"); - goto error; + goto error_put_usb; } mutex_unlock(&dev->io_mutex); @@ -361,7 +361,7 @@ static int hdpvr_probe(struct usb_interface *interface, retval = hdpvr_register_i2c_adapter(dev); if (retval < 0) { v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n"); - goto error; + goto error_free_buffers; } client = hdpvr_register_ir_rx_i2c(dev); @@ -394,13 +394,17 @@ static int hdpvr_probe(struct usb_interface *interface, reg_fail: #if IS_ENABLED(CONFIG_I2C) i2c_del_adapter(&dev->i2c_adapter); +error_free_buffers: #endif + hdpvr_free_buffers(dev); +error_put_usb: + usb_put_dev(dev->udev); + kfree(dev->usbc_buf); +error_v4l2_unregister: + v4l2_device_unregister(&dev->v4l2_dev); +error_free_dev: + kfree(dev); error: - if (dev) { - flush_work(&dev->worker); - /* this frees allocated memory */ - hdpvr_delete(dev); - } return retval; } From 63380839c575e5ea13c8005332665e31897b2740 Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Thu, 31 Aug 2017 11:30:50 +0300 Subject: [PATCH 1805/3326] arm64: move TASK_* definitions to Commit eef94a3d09aa upstream. ILP32 series [1] introduces the dependency on for TASK_SIZE macro. Which in turn requires , and include , giving a circular dependency, because TASK_SIZE is currently located in . In other architectures, TASK_SIZE is defined in , and moving TASK_SIZE there fixes the problem. Discussion: https://patchwork.kernel.org/patch/9929107/ [1] https://github.com/norov/linux/tree/ilp32-next CC: Will Deacon CC: Laura Abbott Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: James Morse Suggested-by: Mark Rutland Signed-off-by: Yury Norov Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/memory.h | 15 --------------- arch/arm64/include/asm/processor.h | 21 +++++++++++++++++++++ arch/arm64/kernel/entry.S | 2 +- 3 files changed, 22 insertions(+), 16 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index f7c4d2146aed09..d4bae7d6e0d8b9 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -61,8 +61,6 @@ * KIMAGE_VADDR - the virtual address of the start of the kernel image * VA_BITS - the maximum number of bits for virtual addresses. * VA_START - the first kernel virtual address. - * TASK_SIZE - the maximum size of a user space task. - * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) #define VA_START (UL(0xffffffffffffffff) - \ @@ -77,19 +75,6 @@ #define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) #define FIXADDR_TOP (PCI_IO_START - SZ_2M) -#define TASK_SIZE_64 (UL(1) << VA_BITS) - -#ifdef CONFIG_COMPAT -#define TASK_SIZE_32 UL(0x100000000) -#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ - TASK_SIZE_32 : TASK_SIZE_64) -#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ - TASK_SIZE_32 : TASK_SIZE_64) -#else -#define TASK_SIZE TASK_SIZE_64 -#endif /* CONFIG_COMPAT */ - -#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) #define KERNEL_START _text #define KERNEL_END _end diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 29adab8138c396..7dddca21fc64e2 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -19,6 +19,10 @@ #ifndef __ASM_PROCESSOR_H #define __ASM_PROCESSOR_H +#define TASK_SIZE_64 (UL(1) << VA_BITS) + +#ifndef __ASSEMBLY__ + /* * Default implementation of macro that returns current * instruction pointer ("program counter"). @@ -37,6 +41,22 @@ #include #include +/* + * TASK_SIZE - the maximum size of a user space task. + * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. + */ +#ifdef CONFIG_COMPAT +#define TASK_SIZE_32 UL(0x100000000) +#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) +#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) +#else +#define TASK_SIZE TASK_SIZE_64 +#endif /* CONFIG_COMPAT */ + +#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) + #define STACK_TOP_MAX TASK_SIZE_64 #ifdef CONFIG_COMPAT #define AARCH32_VECTORS_BASE 0xffff0000 @@ -194,4 +214,5 @@ static inline void spin_lock_prefetch(const void *ptr) int cpu_enable_pan(void *__unused); int cpu_enable_cache_maint_trap(void *__unused); +#endif /* __ASSEMBLY__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e1c59d4008a8d0..f5e851eeda4bb7 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include From 78299fafcd78bec4f4ce145decefff5b7f664d80 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 10 Aug 2017 12:56:18 +0100 Subject: [PATCH 1806/3326] arm64: mm: Use non-global mappings for kernel space Commit e046eb0c9bf2 upstream. In preparation for unmapping the kernel whilst running in userspace, make the kernel mappings non-global so we can avoid expensive TLB invalidation on kernel exit to userspace. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/kernel-pgtable.h | 12 ++++++++++-- arch/arm64/include/asm/pgtable-prot.h | 21 +++++++++++++++------ 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 7803343e5881fb..77a27af013710d 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -78,8 +78,16 @@ /* * Initial memory map attributes. */ -#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) -#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) +#define _SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) +#define _SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define SWAPPER_PTE_FLAGS (_SWAPPER_PTE_FLAGS | PTE_NG) +#define SWAPPER_PMD_FLAGS (_SWAPPER_PMD_FLAGS | PMD_SECT_NG) +#else +#define SWAPPER_PTE_FLAGS _SWAPPER_PTE_FLAGS +#define SWAPPER_PMD_FLAGS _SWAPPER_PMD_FLAGS +#endif #if ARM64_SWAPPER_USES_SECTION_MAPS #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 0a5635fb0ef984..22a926825e3fb8 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -34,8 +34,16 @@ #include -#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) -#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) +#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) +#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define PROT_DEFAULT (_PROT_DEFAULT | PTE_NG) +#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_SECT_NG) +#else +#define PROT_DEFAULT _PROT_DEFAULT +#define PROT_SECT_DEFAULT _PROT_SECT_DEFAULT +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) @@ -48,6 +56,7 @@ #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) #define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) +#define _HYP_PAGE_DEFAULT (_PAGE_DEFAULT & ~PTE_NG) #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) @@ -55,15 +64,15 @@ #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) -#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) -#define PAGE_HYP_EXEC __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) -#define PAGE_HYP_RO __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) +#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) +#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) +#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) -#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) +#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) From 59f47f9dcdcde20e0a20c434f18bcc50c7c824e4 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 10 Aug 2017 13:04:48 +0100 Subject: [PATCH 1807/3326] arm64: mm: Temporarily disable ARM64_SW_TTBR0_PAN Commit 376133b7edc2 upstream. We're about to rework the way ASIDs are allocated, switch_mm is implemented and low-level kernel entry/exit is handled, so keep the ARM64_SW_TTBR0_PAN code out of the way whilst we do the heavy lifting. It will be re-enabled in a subsequent patch. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 8f73eaccf50a8a..63ff04fe03e109 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -882,6 +882,7 @@ endif config ARM64_SW_TTBR0_PAN bool "Emulate Privileged Access Never using TTBR0_EL1 switching" + depends on BROKEN # Temporary while switch_mm is reworked help Enabling this option prevents the kernel from accessing user-space memory directly by pointing TTBR0_EL1 to a reserved From d26f0a5dc6a6d8237b024a150de6be30fce7b467 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 10 Aug 2017 13:19:09 +0100 Subject: [PATCH 1808/3326] arm64: mm: Move ASID from TTBR0 to TTBR1 Commit 7655abb95386 upstream. In preparation for mapping kernelspace and userspace with different ASIDs, move the ASID to TTBR1 and update switch_mm to context-switch TTBR0 via an invalid mapping (the zero page). Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/mmu_context.h | 7 +++++++ arch/arm64/include/asm/pgtable-hwdef.h | 1 + arch/arm64/include/asm/proc-fns.h | 6 ------ arch/arm64/mm/proc.S | 9 ++++++--- 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 9d155fa9a50791..aa39c126c0d08a 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -57,6 +57,13 @@ static inline void cpu_set_reserved_ttbr0(void) isb(); } +static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) +{ + BUG_ON(pgd == swapper_pg_dir); + cpu_set_reserved_ttbr0(); + cpu_do_switch_mm(virt_to_phys(pgd),mm); +} + /* * TCR.T0SZ value to use when the ID map is active. Usually equals * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index eb0c2bd90de903..8df4cb6ac6f71e 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -272,6 +272,7 @@ #define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT) #define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT) +#define TCR_A1 (UL(1) << 22) #define TCR_ASID16 (UL(1) << 36) #define TCR_TBI0 (UL(1) << 37) #define TCR_HA (UL(1) << 39) diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 14ad6e4e87d114..16cef2e8449ec1 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #include -#define cpu_switch_mm(pgd,mm) \ -do { \ - BUG_ON(pgd == swapper_pg_dir); \ - cpu_do_switch_mm(virt_to_phys(pgd),mm); \ -} while (0) - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* __ASM_PROCFNS_H */ diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 877d42fb0df655..0bd7550b72301d 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -139,9 +139,12 @@ ENDPROC(cpu_do_resume) */ ENTRY(cpu_do_switch_mm) pre_ttbr0_update_workaround x0, x2, x3 + mrs x2, ttbr1_el1 mmid x1, x1 // get mm->context.id - bfi x0, x1, #48, #16 // set the ASID - msr ttbr0_el1, x0 // set TTBR0 + bfi x2, x1, #48, #16 // set the ASID + msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) + isb + msr ttbr0_el1, x0 // now update TTBR0 isb post_ttbr0_update_workaround ret @@ -225,7 +228,7 @@ ENTRY(__cpu_setup) * both user and kernel. */ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ - TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 + TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1 tcr_set_idmap_t0sz x10, x9 /* From 4445cb04410fb5ea845e2991144f77d00a78ba0c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 10 Aug 2017 13:29:06 +0100 Subject: [PATCH 1809/3326] arm64: mm: Remove pre_ttbr0_update_workaround for Falkor erratum #E1003 Commit 85d13c001497 upstream. The pre_ttbr0_update_workaround hook is called prior to context-switching TTBR0 because Falkor erratum E1003 can cause TLB allocation with the wrong ASID if both the ASID and the base address of the TTBR are updated at the same time. With the ASID sitting safely in TTBR1, we no longer update things atomically, so we can remove the pre_ttbr0_update_workaround macro as it's no longer required. The erratum infrastructure and documentation is left around for #E1003, as it will be required by the entry trampoline code in a future patch. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/assembler.h | 22 ---------------------- arch/arm64/include/asm/mmu_context.h | 2 -- arch/arm64/mm/context.c | 11 ----------- arch/arm64/mm/proc.S | 1 - 4 files changed, 36 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 7896728f3b220a..b52ac54fd8a41b 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -25,7 +25,6 @@ #include #include -#include #include #include #include @@ -465,27 +464,6 @@ alternative_endif .endm /* - * Errata workaround prior to TTBR0_EL1 update - * - * val: TTBR value with new BADDR, preserved - * tmp0: temporary register, clobbered - * tmp1: other temporary register, clobbered - */ - .macro pre_ttbr0_update_workaround, val, tmp0, tmp1 -#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 -alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 - mrs \tmp0, ttbr0_el1 - mov \tmp1, #FALKOR_RESERVED_ASID - bfi \tmp0, \tmp1, #48, #16 // reserved ASID + old BADDR - msr ttbr0_el1, \tmp0 - isb - bfi \tmp0, \val, #0, #48 // reserved ASID + new BADDR - msr ttbr0_el1, \tmp0 - isb -alternative_else_nop_endif -#endif - .endm - /* * Errata workaround post TTBR0_EL1 update. */ diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index aa39c126c0d08a..da29766a181ca7 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -19,8 +19,6 @@ #ifndef __ASM_MMU_CONTEXT_H #define __ASM_MMU_CONTEXT_H -#define FALKOR_RESERVED_ASID 1 - #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index ab9f5f0fb2c7fc..78816e476491ee 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -79,13 +79,6 @@ void verify_cpu_asid_bits(void) } } -static void set_reserved_asid_bits(void) -{ - if (IS_ENABLED(CONFIG_QCOM_FALKOR_ERRATUM_1003) && - cpus_have_const_cap(ARM64_WORKAROUND_QCOM_FALKOR_E1003)) - __set_bit(FALKOR_RESERVED_ASID, asid_map); -} - static void flush_context(unsigned int cpu) { int i; @@ -94,8 +87,6 @@ static void flush_context(unsigned int cpu) /* Update the list of reserved ASIDs and the ASID bitmap. */ bitmap_clear(asid_map, 0, NUM_USER_ASIDS); - set_reserved_asid_bits(); - /* * Ensure the generation bump is observed before we xchg the * active_asids. @@ -250,8 +241,6 @@ static int asids_init(void) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); - set_reserved_asid_bits(); - pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); return 0; } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 0bd7550b72301d..1623150ed0a6b6 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -138,7 +138,6 @@ ENDPROC(cpu_do_resume) * - pgd_phys - physical address of new TTB */ ENTRY(cpu_do_switch_mm) - pre_ttbr0_update_workaround x0, x2, x3 mrs x2, ttbr1_el1 mmid x1, x1 // get mm->context.id bfi x2, x1, #48, #16 // set the ASID From 95ce0d51f9a8ee5417a3d1d8699d6f654dd90a62 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 10 Aug 2017 13:34:30 +0100 Subject: [PATCH 1810/3326] arm64: mm: Rename post_ttbr0_update_workaround Commit 158d495899ce upstream. The post_ttbr0_update_workaround hook applies to any change to TTBRx_EL1. Since we're using TTBR1 for the ASID, rename the hook to make it clearer as to what it's doing. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/assembler.h | 5 ++--- arch/arm64/kernel/entry.S | 2 +- arch/arm64/mm/proc.S | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index b52ac54fd8a41b..a1e5c1eb3c85a6 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -464,10 +464,9 @@ alternative_endif .endm /* -/* - * Errata workaround post TTBR0_EL1 update. + * Errata workaround post TTBRx_EL1 update. */ - .macro post_ttbr0_update_workaround + .macro post_ttbr_update_workaround #ifdef CONFIG_CAVIUM_ERRATUM_27456 alternative_if ARM64_WORKAROUND_CAVIUM_27456 ic iallu diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index f5e851eeda4bb7..ae2c9a9380ff54 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -255,7 +255,7 @@ alternative_else_nop_endif * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache * corruption). */ - post_ttbr0_update_workaround + post_ttbr_update_workaround .endif 1: .if \el != 0 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 1623150ed0a6b6..447537c1699dd8 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -145,7 +145,7 @@ ENTRY(cpu_do_switch_mm) isb msr ttbr0_el1, x0 // now update TTBR0 isb - post_ttbr0_update_workaround + post_ttbr_update_workaround ret ENDPROC(cpu_do_switch_mm) From 00ff7de671f87cf7b330b563c16ee6fb8f5e3cab Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 10 Aug 2017 13:58:16 +0100 Subject: [PATCH 1811/3326] arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN Commit 27a921e75711 upstream. With the ASID now installed in TTBR1, we can re-enable ARM64_SW_TTBR0_PAN by ensuring that we switch to a reserved ASID of zero when disabling user access and restore the active user ASID on the uaccess enable path. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/Kconfig | 1 - arch/arm64/include/asm/asm-uaccess.h | 25 +++++++++++++++++-------- arch/arm64/include/asm/uaccess.h | 21 +++++++++++++++++---- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/lib/clear_user.S | 2 +- arch/arm64/lib/copy_from_user.S | 2 +- arch/arm64/lib/copy_in_user.S | 2 +- arch/arm64/lib/copy_to_user.S | 2 +- arch/arm64/mm/cache.S | 2 +- arch/arm64/xen/hypercall.S | 2 +- 10 files changed, 42 insertions(+), 21 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 63ff04fe03e109..8f73eaccf50a8a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -882,7 +882,6 @@ endif config ARM64_SW_TTBR0_PAN bool "Emulate Privileged Access Never using TTBR0_EL1 switching" - depends on BROKEN # Temporary while switch_mm is reworked help Enabling this option prevents the kernel from accessing user-space memory directly by pointing TTBR0_EL1 to a reserved diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index b3da6c88683596..21b8cf304028b0 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -16,11 +16,20 @@ add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 isb + sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE + bic \tmp1, \tmp1, #(0xffff << 48) + msr ttbr1_el1, \tmp1 // set reserved ASID + isb .endm - .macro __uaccess_ttbr0_enable, tmp1 + .macro __uaccess_ttbr0_enable, tmp1, tmp2 get_thread_info \tmp1 ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 + mrs \tmp2, ttbr1_el1 + extr \tmp2, \tmp2, \tmp1, #48 + ror \tmp2, \tmp2, #16 + msr ttbr1_el1, \tmp2 // set the active ASID + isb msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 isb .endm @@ -31,18 +40,18 @@ alternative_if_not ARM64_HAS_PAN alternative_else_nop_endif .endm - .macro uaccess_ttbr0_enable, tmp1, tmp2 + .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 alternative_if_not ARM64_HAS_PAN - save_and_disable_irq \tmp2 // avoid preemption - __uaccess_ttbr0_enable \tmp1 - restore_irq \tmp2 + save_and_disable_irq \tmp3 // avoid preemption + __uaccess_ttbr0_enable \tmp1, \tmp2 + restore_irq \tmp3 alternative_else_nop_endif .endm #else .macro uaccess_ttbr0_disable, tmp1 .endm - .macro uaccess_ttbr0_enable, tmp1, tmp2 + .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 .endm #endif @@ -56,8 +65,8 @@ alternative_if ARM64_ALT_PAN_NOT_UAO alternative_else_nop_endif .endm - .macro uaccess_enable_not_uao, tmp1, tmp2 - uaccess_ttbr0_enable \tmp1, \tmp2 + .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3 + uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3 alternative_if ARM64_ALT_PAN_NOT_UAO SET_PSTATE_PAN(0) alternative_else_nop_endif diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index fc0f9eb66039e7..750a3b76a01c34 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -107,15 +107,19 @@ static inline void __uaccess_ttbr0_disable(void) { unsigned long ttbr; + ttbr = read_sysreg(ttbr1_el1); /* reserved_ttbr0 placed at the end of swapper_pg_dir */ - ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE; - write_sysreg(ttbr, ttbr0_el1); + write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1); + isb(); + /* Set reserved ASID */ + ttbr &= ~(0xffffUL << 48); + write_sysreg(ttbr, ttbr1_el1); isb(); } static inline void __uaccess_ttbr0_enable(void) { - unsigned long flags; + unsigned long flags, ttbr0, ttbr1; /* * Disable interrupts to avoid preemption between reading the 'ttbr0' @@ -123,7 +127,16 @@ static inline void __uaccess_ttbr0_enable(void) * roll-over and an update of 'ttbr0'. */ local_irq_save(flags); - write_sysreg(current_thread_info()->ttbr0, ttbr0_el1); + ttbr0 = current_thread_info()->ttbr0; + + /* Restore active ASID */ + ttbr1 = read_sysreg(ttbr1_el1); + ttbr1 |= ttbr0 & (0xffffUL << 48); + write_sysreg(ttbr1, ttbr1_el1); + isb(); + + /* Restore user page table */ + write_sysreg(ttbr0, ttbr0_el1); isb(); local_irq_restore(flags); } diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index ae2c9a9380ff54..acfca2f49a2f54 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -184,7 +184,7 @@ alternative_if ARM64_HAS_PAN alternative_else_nop_endif .if \el != 0 - mrs x21, ttbr0_el1 + mrs x21, ttbr1_el1 tst x21, #0xffff << 48 // Check for the reserved ASID orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR b.eq 1f // TTBR0 access already disabled @@ -246,7 +246,7 @@ alternative_else_nop_endif tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set .endif - __uaccess_ttbr0_enable x0 + __uaccess_ttbr0_enable x0, x1 .if \el == 0 /* diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index e88fb99c156163..8f9c4641e706b2 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -30,7 +30,7 @@ * Alignment fixed up by hardware. */ ENTRY(__clear_user) - uaccess_enable_not_uao x2, x3 + uaccess_enable_not_uao x2, x3, x4 mov x2, x1 // save the size for fixup return subs x1, x1, #8 b.mi 2f diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 4b5d826895ff16..69d86a80f3e2a3 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -64,7 +64,7 @@ end .req x5 ENTRY(__arch_copy_from_user) - uaccess_enable_not_uao x3, x4 + uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" uaccess_disable_not_uao x3 diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index b24a830419ad95..e442b531252ad8 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -65,7 +65,7 @@ end .req x5 ENTRY(raw_copy_in_user) - uaccess_enable_not_uao x3, x4 + uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" uaccess_disable_not_uao x3 diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 351f0766f7a61c..318f15d5c336cc 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -63,7 +63,7 @@ end .req x5 ENTRY(__arch_copy_to_user) - uaccess_enable_not_uao x3, x4 + uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" uaccess_disable_not_uao x3 diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 7f1dbe962cf581..6cd20a8c0952a1 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -49,7 +49,7 @@ ENTRY(flush_icache_range) * - end - virtual end address of region */ ENTRY(__flush_cache_user_range) - uaccess_ttbr0_enable x2, x3 + uaccess_ttbr0_enable x2, x3, x4 dcache_line_size x2, x3 sub x3, x2, #1 bic x4, x0, x3 diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S index 401ceb71540c74..acdbd2c9e899c1 100644 --- a/arch/arm64/xen/hypercall.S +++ b/arch/arm64/xen/hypercall.S @@ -101,7 +101,7 @@ ENTRY(privcmd_call) * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation * is enabled (it implies that hardware UAO and PAN disabled). */ - uaccess_ttbr0_enable x6, x7 + uaccess_ttbr0_enable x6, x7, x8 hvc XEN_IMM /* From a8bad38cff23eaed7b962c0bd7b0553fb31dec15 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 10 Aug 2017 14:10:28 +0100 Subject: [PATCH 1812/3326] arm64: mm: Allocate ASIDs in pairs Commit 0c8ea531b774 upstream. In preparation for separate kernel/user ASIDs, allocate them in pairs for each mm_struct. The bottom bit distinguishes the two: if it is set, then the ASID will map only userspace. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/mmu.h | 1 + arch/arm64/mm/context.c | 25 +++++++++++++++++-------- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 0d34bf0a89c724..01bfb184f2a842 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -17,6 +17,7 @@ #define __ASM_MMU_H #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ +#define USER_ASID_FLAG (UL(1) << 48) typedef struct { atomic64_t id; diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 78816e476491ee..db28958d9e4f7c 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -39,7 +39,16 @@ static cpumask_t tlb_flush_pending; #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) #define ASID_FIRST_VERSION (1UL << asid_bits) -#define NUM_USER_ASIDS ASID_FIRST_VERSION + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1) +#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1) +#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK) +#else +#define NUM_USER_ASIDS (ASID_FIRST_VERSION) +#define asid2idx(asid) ((asid) & ~ASID_MASK) +#define idx2asid(idx) asid2idx(idx) +#endif /* Get the ASIDBits supported by the current CPU */ static u32 get_cpu_asid_bits(void) @@ -104,7 +113,7 @@ static void flush_context(unsigned int cpu) */ if (asid == 0) asid = per_cpu(reserved_asids, i); - __set_bit(asid & ~ASID_MASK, asid_map); + __set_bit(asid2idx(asid), asid_map); per_cpu(reserved_asids, i) = asid; } @@ -156,16 +165,16 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) * We had a valid ASID in a previous life, so try to re-use * it if possible. */ - asid &= ~ASID_MASK; - if (!__test_and_set_bit(asid, asid_map)) + if (!__test_and_set_bit(asid2idx(asid), asid_map)) return newasid; } /* * Allocate a free ASID. If we can't find one, take a note of the - * currently active ASIDs and mark the TLBs as requiring flushes. - * We always count from ASID #1, as we use ASID #0 when setting a - * reserved TTBR0 for the init_mm. + * currently active ASIDs and mark the TLBs as requiring flushes. We + * always count from ASID #2 (index 1), as we use ASID #0 when setting + * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd + * pairs. */ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); if (asid != NUM_USER_ASIDS) @@ -182,7 +191,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) set_asid: __set_bit(asid, asid_map); cur_idx = asid; - return asid | generation; + return idx2asid(asid) | generation; } void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) From 53b13950886c8dd83fdb613cd5b3a150601120e9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Nov 2017 13:58:08 +0000 Subject: [PATCH 1813/3326] arm64: mm: Add arm64_kernel_unmapped_at_el0 helper Commit fc0e1299da54 upstream. In order for code such as TLB invalidation to operate efficiently when the decision to map the kernel at EL0 is determined at runtime, this patch introduces a helper function, arm64_kernel_unmapped_at_el0, to determine whether or not the kernel is mapped whilst running in userspace. Currently, this just reports the value of CONFIG_UNMAP_KERNEL_AT_EL0, but will later be hooked up to a fake CPU capability using a static key. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/mmu.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 01bfb184f2a842..c07954638658e6 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -19,6 +19,8 @@ #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ #define USER_ASID_FLAG (UL(1) << 48) +#ifndef __ASSEMBLY__ + typedef struct { atomic64_t id; void *vdso; @@ -32,6 +34,11 @@ typedef struct { */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) +static inline bool arm64_kernel_unmapped_at_el0(void) +{ + return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0); +} + extern void paging_init(void); extern void bootmem_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); @@ -42,4 +49,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, extern void *fixmap_remap_fdt(phys_addr_t dt_phys); extern void mark_linear_text_alias_ro(void); +#endif /* !__ASSEMBLY__ */ #endif From d6c414013bb4c41c93fcef1ffa90df4f56c4cc53 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 10 Aug 2017 14:13:33 +0100 Subject: [PATCH 1814/3326] arm64: mm: Invalidate both kernel and user ASIDs when performing TLBI Commit 9b0de864b5bc upstream. Since an mm has both a kernel and a user ASID, we need to ensure that broadcast TLB maintenance targets both address spaces so that things like CoW continue to work with the uaccess primitives in the kernel. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/tlbflush.h | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index af1c76981911db..9e82dd79c7db59 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -23,6 +23,7 @@ #include #include +#include /* * Raw TLBI operations. @@ -54,6 +55,11 @@ #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0) +#define __tlbi_user(op, arg) do { \ + if (arm64_kernel_unmapped_at_el0()) \ + __tlbi(op, (arg) | USER_ASID_FLAG); \ +} while (0) + /* * TLB Management * ============== @@ -115,6 +121,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm) dsb(ishst); __tlbi(aside1is, asid); + __tlbi_user(aside1is, asid); dsb(ish); } @@ -125,6 +132,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, dsb(ishst); __tlbi(vale1is, addr); + __tlbi_user(vale1is, addr); dsb(ish); } @@ -151,10 +159,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, dsb(ishst); for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { - if (last_level) + if (last_level) { __tlbi(vale1is, addr); - else + __tlbi_user(vale1is, addr); + } else { __tlbi(vae1is, addr); + __tlbi_user(vae1is, addr); + } } dsb(ish); } @@ -194,6 +205,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm, unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); __tlbi(vae1is, addr); + __tlbi_user(vae1is, addr); dsb(ish); } From 78422a7b5160954aff02bc3655271adc5d21b04e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Nov 2017 14:07:40 +0000 Subject: [PATCH 1815/3326] arm64: entry: Add exception trampoline page for exceptions from EL0 Commit c7b9adaf85f8 upstream. To allow unmapping of the kernel whilst running at EL0, we need to point the exception vectors at an entry trampoline that can map/unmap the kernel on entry/exit respectively. This patch adds the trampoline page, although it is not yet plugged into the vector table and is therefore unused. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/entry.S | 86 +++++++++++++++++++++++++++++++++ arch/arm64/kernel/vmlinux.lds.S | 17 +++++++ 2 files changed, 103 insertions(+) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index acfca2f49a2f54..cbef6983b52fd4 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -28,6 +28,8 @@ #include #include #include +#include +#include #include #include #include @@ -895,6 +897,90 @@ __ni_sys_trace: .popsection // .entry.text +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +/* + * Exception vectors trampoline. + */ + .pushsection ".entry.tramp.text", "ax" + + .macro tramp_map_kernel, tmp + mrs \tmp, ttbr1_el1 + sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) + bic \tmp, \tmp, #USER_ASID_FLAG + msr ttbr1_el1, \tmp + .endm + + .macro tramp_unmap_kernel, tmp + mrs \tmp, ttbr1_el1 + add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) + orr \tmp, \tmp, #USER_ASID_FLAG + msr ttbr1_el1, \tmp + /* + * We avoid running the post_ttbr_update_workaround here because the + * user and kernel ASIDs don't have conflicting mappings, so any + * "blessing" as described in: + * + * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com + * + * will not hurt correctness. Whilst this may partially defeat the + * point of using split ASIDs in the first place, it avoids + * the hit of invalidating the entire I-cache on every return to + * userspace. + */ + .endm + + .macro tramp_ventry, regsize = 64 + .align 7 +1: + .if \regsize == 64 + msr tpidrro_el0, x30 // Restored in kernel_ventry + .endif + tramp_map_kernel x30 + ldr x30, =vectors + prfm plil1strm, [x30, #(1b - tramp_vectors)] + msr vbar_el1, x30 + add x30, x30, #(1b - tramp_vectors) + isb + br x30 + .endm + + .macro tramp_exit, regsize = 64 + adr x30, tramp_vectors + msr vbar_el1, x30 + tramp_unmap_kernel x30 + .if \regsize == 64 + mrs x30, far_el1 + .endif + eret + .endm + + .align 11 +ENTRY(tramp_vectors) + .space 0x400 + + tramp_ventry + tramp_ventry + tramp_ventry + tramp_ventry + + tramp_ventry 32 + tramp_ventry 32 + tramp_ventry 32 + tramp_ventry 32 +END(tramp_vectors) + +ENTRY(tramp_exit_native) + tramp_exit +END(tramp_exit_native) + +ENTRY(tramp_exit_compat) + tramp_exit 32 +END(tramp_exit_compat) + + .ltorg + .popsection // .entry.tramp.text +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ + /* * Special system call wrappers. */ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 7da3e5c366a072..6b4260f22aab3a 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -57,6 +57,17 @@ jiffies = jiffies_64; #define HIBERNATE_TEXT #endif +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define TRAMP_TEXT \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \ + *(.entry.tramp.text) \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__entry_tramp_text_end) = .; +#else +#define TRAMP_TEXT +#endif + /* * The size of the PE/COFF section that covers the kernel image, which * runs from stext to _edata, must be a round multiple of the PE/COFF @@ -113,6 +124,7 @@ SECTIONS HYPERVISOR_TEXT IDMAP_TEXT HIBERNATE_TEXT + TRAMP_TEXT *(.fixup) *(.gnu.warning) . = ALIGN(16); @@ -214,6 +226,11 @@ SECTIONS . += RESERVED_TTBR0_SIZE; #endif +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + tramp_pg_dir = .; + . += PAGE_SIZE; +#endif + __pecoff_data_size = ABSOLUTE(. - __initdata_begin); _end = .; From 63d13760abd7a819b6e95793c795cdc155bfff44 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Nov 2017 14:14:17 +0000 Subject: [PATCH 1816/3326] arm64: mm: Map entry trampoline into trampoline and kernel page tables Commit 51a0048beb44 upstream. The exception entry trampoline needs to be mapped at the same virtual address in both the trampoline page table (which maps nothing else) and also the kernel page table, so that we can swizzle TTBR1_EL1 on exceptions from and return to EL0. This patch maps the trampoline at a fixed virtual address in the fixmap area of the kernel virtual address space, which allows the kernel proper to be randomized with respect to the trampoline when KASLR is enabled. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/fixmap.h | 4 ++++ arch/arm64/include/asm/pgtable.h | 1 + arch/arm64/kernel/asm-offsets.c | 6 +++++- arch/arm64/mm/mmu.c | 23 +++++++++++++++++++++++ 4 files changed, 33 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index 4052ec39e8dbb0..8119b49be98dc2 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -58,6 +58,10 @@ enum fixed_addresses { FIX_APEI_GHES_NMI, #endif /* CONFIG_ACPI_APEI_GHES */ +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + FIX_ENTRY_TRAMP_TEXT, +#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT)) +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ __end_of_permanent_fixed_addresses, /* diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 960d05c8816af0..aafea648a30ff9 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -684,6 +684,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; +extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; /* * Encode and decode a swap entry: diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 71bf088f1e4b2e..af247d10252f50 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -148,11 +149,14 @@ int main(void) DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2)); DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id)); DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state)); - BLANK(); DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address)); DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address)); DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next)); DEFINE(ARM64_FTR_SYSVAL, offsetof(struct arm64_ftr_reg, sys_val)); + BLANK(); +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + DEFINE(TRAMP_VALIAS, TRAMP_VALIAS); +#endif return 0; } diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index f1eb15e0e8642d..3b9af178f791ba 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -525,6 +525,29 @@ static int __init parse_rodata(char *arg) } early_param("rodata", parse_rodata); +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static int __init map_entry_trampoline(void) +{ + extern char __entry_tramp_text_start[]; + + pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; + phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); + + /* The trampoline is always mapped and can therefore be global */ + pgprot_val(prot) &= ~PTE_NG; + + /* Map only the text into the trampoline page table */ + memset(tramp_pg_dir, 0, PGD_SIZE); + __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, + prot, pgd_pgtable_alloc, 0); + + /* ...as well as the kernel page table */ + __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); + return 0; +} +core_initcall(map_entry_trampoline); +#endif + /* * Create fine-grained mappings for the kernel. */ From 86200f218f98704fa4b78734d574a38519d5ef3c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Nov 2017 14:20:21 +0000 Subject: [PATCH 1817/3326] arm64: entry: Explicitly pass exception level to kernel_ventry macro Commit 5b1f7fe41909 upstream. We will need to treat exceptions from EL0 differently in kernel_ventry, so rework the macro to take the exception level as an argument and construct the branch target using that. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/entry.S | 46 +++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index cbef6983b52fd4..fa924eb6b4c2a5 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -71,7 +71,7 @@ #define BAD_FIQ 2 #define BAD_ERROR 3 - .macro kernel_ventry label + .macro kernel_ventry, el, label, regsize = 64 .align 7 sub sp, sp, #S_FRAME_SIZE #ifdef CONFIG_VMAP_STACK @@ -84,7 +84,7 @@ tbnz x0, #THREAD_SHIFT, 0f sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp - b \label + b el\()\el\()_\label 0: /* @@ -116,7 +116,7 @@ sub sp, sp, x0 mrs x0, tpidrro_el0 #endif - b \label + b el\()\el\()_\label .endm .macro kernel_entry, el, regsize = 64 @@ -367,31 +367,31 @@ tsk .req x28 // current thread_info .align 11 ENTRY(vectors) - kernel_ventry el1_sync_invalid // Synchronous EL1t - kernel_ventry el1_irq_invalid // IRQ EL1t - kernel_ventry el1_fiq_invalid // FIQ EL1t - kernel_ventry el1_error_invalid // Error EL1t + kernel_ventry 1, sync_invalid // Synchronous EL1t + kernel_ventry 1, irq_invalid // IRQ EL1t + kernel_ventry 1, fiq_invalid // FIQ EL1t + kernel_ventry 1, error_invalid // Error EL1t - kernel_ventry el1_sync // Synchronous EL1h - kernel_ventry el1_irq // IRQ EL1h - kernel_ventry el1_fiq_invalid // FIQ EL1h - kernel_ventry el1_error_invalid // Error EL1h + kernel_ventry 1, sync // Synchronous EL1h + kernel_ventry 1, irq // IRQ EL1h + kernel_ventry 1, fiq_invalid // FIQ EL1h + kernel_ventry 1, error_invalid // Error EL1h - kernel_ventry el0_sync // Synchronous 64-bit EL0 - kernel_ventry el0_irq // IRQ 64-bit EL0 - kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0 - kernel_ventry el0_error_invalid // Error 64-bit EL0 + kernel_ventry 0, sync // Synchronous 64-bit EL0 + kernel_ventry 0, irq // IRQ 64-bit EL0 + kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0 + kernel_ventry 0, error_invalid // Error 64-bit EL0 #ifdef CONFIG_COMPAT - kernel_ventry el0_sync_compat // Synchronous 32-bit EL0 - kernel_ventry el0_irq_compat // IRQ 32-bit EL0 - kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0 - kernel_ventry el0_error_invalid_compat // Error 32-bit EL0 + kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 + kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 + kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0 + kernel_ventry 0, error_invalid_compat, 32 // Error 32-bit EL0 #else - kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0 - kernel_ventry el0_irq_invalid // IRQ 32-bit EL0 - kernel_ventry el0_fiq_invalid // FIQ 32-bit EL0 - kernel_ventry el0_error_invalid // Error 32-bit EL0 + kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0 + kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0 + kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 + kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 #endif END(vectors) From 6472f1a3a54f061864a5091788778793661aa7c0 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Nov 2017 14:24:29 +0000 Subject: [PATCH 1818/3326] arm64: entry: Hook up entry trampoline to exception vectors Commit 4bf3286d29f3 upstream. Hook up the entry trampoline to our exception vectors so that all exceptions from and returns to EL0 go via the trampoline, which swizzles the vector base register accordingly. Transitioning to and from the kernel clobbers x30, so we use tpidrro_el0 and far_el1 as scratch registers for native tasks. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/entry.S | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index fa924eb6b4c2a5..f66ab8eb45b939 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -73,6 +73,17 @@ .macro kernel_ventry, el, label, regsize = 64 .align 7 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + .if \el == 0 + .if \regsize == 64 + mrs x30, tpidrro_el0 + msr tpidrro_el0, xzr + .else + mov x30, xzr + .endif + .endif +#endif + sub sp, sp, #S_FRAME_SIZE #ifdef CONFIG_VMAP_STACK /* @@ -119,6 +130,11 @@ b el\()\el\()_\label .endm + .macro tramp_alias, dst, sym + mov_q \dst, TRAMP_VALIAS + add \dst, \dst, #(\sym - .entry.tramp.text) + .endm + .macro kernel_entry, el, regsize = 64 .if \regsize == 32 mov w0, w0 // zero upper 32 bits of x0 @@ -269,18 +285,20 @@ alternative_else_nop_endif .if \el == 0 ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 + tst x22, #PSR_MODE32_BIT // native task? + b.eq 3f + #ifdef CONFIG_ARM64_ERRATUM_845719 alternative_if ARM64_WORKAROUND_845719 - tbz x22, #4, 1f #ifdef CONFIG_PID_IN_CONTEXTIDR mrs x29, contextidr_el1 msr contextidr_el1, x29 #else msr contextidr_el1, xzr #endif -1: alternative_else_nop_endif #endif +3: .endif msr elr_el1, x21 // set up the return data @@ -302,7 +320,22 @@ alternative_else_nop_endif ldp x28, x29, [sp, #16 * 14] ldr lr, [sp, #S_LR] add sp, sp, #S_FRAME_SIZE // restore sp - eret // return to kernel + +#ifndef CONFIG_UNMAP_KERNEL_AT_EL0 + eret +#else + .if \el == 0 + bne 4f + msr far_el1, x30 + tramp_alias x30, tramp_exit_native + br x30 +4: + tramp_alias x30, tramp_exit_compat + br x30 + .else + eret + .endif +#endif .endm .macro irq_stack_entry From 15a511c0cbd1b1c8f800ce8c8921fb2390fb2d05 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Nov 2017 14:29:19 +0000 Subject: [PATCH 1819/3326] arm64: erratum: Work around Falkor erratum #E1003 in trampoline code Commit d1777e686ad1 upstream. We rely on an atomic swizzling of TTBR1 when transitioning from the entry trampoline to the kernel proper on an exception. We can't rely on this atomicity in the face of Falkor erratum #E1003, so on affected cores we can issue a TLB invalidation to invalidate the walk cache prior to jumping into the kernel. There is still the possibility of a TLB conflict here due to conflicting walk cache entries prior to the invalidation, but this doesn't appear to be the case on these CPUs in practice. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/Kconfig | 17 +++++------------ arch/arm64/kernel/entry.S | 12 ++++++++++++ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 8f73eaccf50a8a..3cec78c81b33cf 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -504,20 +504,13 @@ config CAVIUM_ERRATUM_30115 config QCOM_FALKOR_ERRATUM_1003 bool "Falkor E1003: Incorrect translation due to ASID change" default y - select ARM64_PAN if ARM64_SW_TTBR0_PAN help On Falkor v1, an incorrect ASID may be cached in the TLB when ASID - and BADDR are changed together in TTBRx_EL1. The workaround for this - issue is to use a reserved ASID in cpu_do_switch_mm() before - switching to the new ASID. Saying Y here selects ARM64_PAN if - ARM64_SW_TTBR0_PAN is selected. This is done because implementing and - maintaining the E1003 workaround in the software PAN emulation code - would be an unnecessary complication. The affected Falkor v1 CPU - implements ARMv8.1 hardware PAN support and using hardware PAN - support versus software PAN emulation is mutually exclusive at - runtime. - - If unsure, say Y. + and BADDR are changed together in TTBRx_EL1. Since we keep the ASID + in TTBR1_EL1, this situation only occurs in the entry trampoline and + then only for entries in the walk cache, since the leaf translation + is unchanged. Work around the erratum by invalidating the walk cache + entries for the trampoline before entering the kernel proper. config QCOM_FALKOR_ERRATUM_1009 bool "Falkor E1009: Prematurely complete a DSB after a TLBI" diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index f66ab8eb45b939..edf7681f0d370c 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -941,6 +941,18 @@ __ni_sys_trace: sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) bic \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 +alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 + /* ASID already in \tmp[63:48] */ + movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) + movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) + /* 2MB boundary containing the vectors, so we nobble the walk cache */ + movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) + isb + tlbi vae1, \tmp + dsb nsh +alternative_else_nop_endif +#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ .endm .macro tramp_unmap_kernel, tmp From 4e29f25b2bc1e3a5f545b2ce842911c030277309 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 13 Dec 2017 14:19:37 -0800 Subject: [PATCH 1820/3326] arm64: cpu_errata: Add Kryo to Falkor 1003 errata Commit bb48711800e6 upstream. The Kryo CPUs are also affected by the Falkor 1003 errata, so we need to do the same workaround on Kryo CPUs. The MIDR is slightly more complicated here, where the PART number is not always the same when looking at all the bits from 15 to 4. Drop the lower 8 bits and just look at the top 4 to see if it's '2' and then consider those as Kryo CPUs. This covers all the combinations without having to list them all out. Fixes: 38fd94b0275c ("arm64: Work around Falkor erratum 1003") Acked-by: Will Deacon Signed-off-by: Stephen Boyd Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- Documentation/arm64/silicon-errata.txt | 2 +- arch/arm64/include/asm/cputype.h | 2 ++ arch/arm64/kernel/cpu_errata.c | 21 +++++++++++++++++++++ 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index 704770c0edf2e3..f3d0d316d5f128 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -71,7 +71,7 @@ stable kernels. | Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 | | Hisilicon | Hip0{6,7} | #161010701 | N/A | | | | | | -| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | +| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | | Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | | Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 | diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index cbf08d7cbf3089..da036fe685732c 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -92,6 +92,7 @@ #define QCOM_CPU_PART_FALKOR_V1 0x800 #define QCOM_CPU_PART_FALKOR 0xC00 +#define QCOM_CPU_PART_KRYO 0x200 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) @@ -101,6 +102,7 @@ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) +#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 0e27f86ee70976..e4c78630a730cd 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -30,6 +30,20 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) entry->midr_range_max); } +static bool __maybe_unused +is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) +{ + u32 model; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + model = read_cpuid_id(); + model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | + MIDR_ARCHITECTURE_MASK; + + return model == entry->midr_model; +} + static bool has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, int scope) @@ -169,6 +183,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(0, 0)), }, + { + .desc = "Qualcomm Technologies Kryo erratum 1003", + .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, + .def_scope = SCOPE_LOCAL_CPU, + .midr_model = MIDR_QCOM_KRYO, + .matches = is_kryo_midr, + }, #endif #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 { From 2843ade2dad2249900f508f61068d245921ce9f1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Nov 2017 14:33:28 +0000 Subject: [PATCH 1821/3326] arm64: tls: Avoid unconditional zeroing of tpidrro_el0 for native tasks Commit 18011eac28c7 upstream. When unmapping the kernel at EL0, we use tpidrro_el0 as a scratch register during exception entry from native tasks and subsequently zero it in the kernel_ventry macro. We can therefore avoid zeroing tpidrro_el0 in the context-switch path for native tasks using the entry trampoline. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/process.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index bcd22d7ee59023..9e773732520cd5 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -314,16 +314,14 @@ void tls_preserve_current_state(void) static void tls_thread_switch(struct task_struct *next) { - unsigned long tpidr, tpidrro; - tls_preserve_current_state(); - tpidr = *task_user_tls(next); - tpidrro = is_compat_thread(task_thread_info(next)) ? - next->thread.tp_value : 0; + if (is_compat_thread(task_thread_info(next))) + write_sysreg(next->thread.tp_value, tpidrro_el0); + else if (!arm64_kernel_unmapped_at_el0()) + write_sysreg(0, tpidrro_el0); - write_sysreg(tpidr, tpidr_el0); - write_sysreg(tpidrro, tpidrro_el0); + write_sysreg(*task_user_tls(next), tpidr_el0); } /* Restore the UAO state depending on next's addr_limit */ From 7fafcbb04f94d4709a5c8b7fb2c196b5bdf4fd7b Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Nov 2017 14:38:19 +0000 Subject: [PATCH 1822/3326] arm64: entry: Add fake CPU feature for unmapping the kernel at EL0 Commit ea1e3de85e94 upstream. Allow explicit disabling of the entry trampoline on the kernel command line (kpti=off) by adding a fake CPU feature (ARM64_UNMAP_KERNEL_AT_EL0) that can be used to toggle the alternative sequences in our entry code and avoid use of the trampoline altogether if desired. This also allows us to make use of a static key in arm64_kernel_unmapped_at_el0(). Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/mmu.h | 3 ++- arch/arm64/kernel/cpufeature.c | 41 ++++++++++++++++++++++++++++++++ arch/arm64/kernel/entry.S | 9 +++---- 4 files changed, 50 insertions(+), 6 deletions(-) diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 8da621627d7c89..6835a48b31d4a4 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -40,7 +40,8 @@ #define ARM64_WORKAROUND_858921 19 #define ARM64_WORKAROUND_CAVIUM_30115 20 #define ARM64_HAS_DCPOP 21 +#define ARM64_UNMAP_KERNEL_AT_EL0 23 -#define ARM64_NCAPS 22 +#define ARM64_NCAPS 24 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index c07954638658e6..da6f12e4071418 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -36,7 +36,8 @@ typedef struct { static inline bool arm64_kernel_unmapped_at_el0(void) { - return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0); + return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && + cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); } extern void paging_init(void); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 21e2c95d24e72a..1a6ec079c541cf 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -796,6 +796,40 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus ID_AA64PFR0_FP_SHIFT) < 0; } +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ + +static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, + int __unused) +{ + /* Forced on command line? */ + if (__kpti_forced) { + pr_info_once("kernel page table isolation forced %s by command line option\n", + __kpti_forced > 0 ? "ON" : "OFF"); + return __kpti_forced > 0; + } + + /* Useful for KASLR robustness */ + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + return true; + + return false; +} + +static int __init parse_kpti(char *str) +{ + bool enabled; + int ret = strtobool(str, &enabled); + + if (ret) + return ret; + + __kpti_forced = enabled ? 1 : -1; + return 0; +} +__setup("kpti=", parse_kpti); +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -882,6 +916,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .def_scope = SCOPE_SYSTEM, .matches = hyp_offset_low, }, +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + { + .capability = ARM64_UNMAP_KERNEL_AT_EL0, + .def_scope = SCOPE_SYSTEM, + .matches = unmap_kernel_at_el0, + }, +#endif { /* FP/SIMD is not implemented */ .capability = ARM64_HAS_NO_FPSIMD, diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index edf7681f0d370c..b431a6de1b4f38 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -74,6 +74,7 @@ .macro kernel_ventry, el, label, regsize = 64 .align 7 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +alternative_if ARM64_UNMAP_KERNEL_AT_EL0 .if \el == 0 .if \regsize == 64 mrs x30, tpidrro_el0 @@ -82,6 +83,7 @@ mov x30, xzr .endif .endif +alternative_else_nop_endif #endif sub sp, sp, #S_FRAME_SIZE @@ -321,10 +323,9 @@ alternative_else_nop_endif ldr lr, [sp, #S_LR] add sp, sp, #S_FRAME_SIZE // restore sp -#ifndef CONFIG_UNMAP_KERNEL_AT_EL0 - eret -#else .if \el == 0 +alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 bne 4f msr far_el1, x30 tramp_alias x30, tramp_exit_native @@ -332,10 +333,10 @@ alternative_else_nop_endif 4: tramp_alias x30, tramp_exit_compat br x30 +#endif .else eret .endif -#endif .endm .macro irq_stack_entry From 3882b5f63f3cfff225cee691dcb8357bf6714d07 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 6 Dec 2017 11:24:02 +0000 Subject: [PATCH 1823/3326] arm64: kaslr: Put kernel vectors address in separate data page Commit 6c27c4082f4f upstream. The literal pool entry for identifying the vectors base is the only piece of information in the trampoline page that identifies the true location of the kernel. This patch moves it into a page-aligned region of the .rodata section and maps this adjacent to the trampoline text via an additional fixmap entry, which protects against any accidental leakage of the trampoline contents. Suggested-by: Ard Biesheuvel Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/fixmap.h | 1 + arch/arm64/kernel/entry.S | 14 ++++++++++++++ arch/arm64/kernel/vmlinux.lds.S | 5 ++++- arch/arm64/mm/mmu.c | 10 +++++++++- 4 files changed, 28 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index 8119b49be98dc2..ec1e6d6fa14ccd 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -59,6 +59,7 @@ enum fixed_addresses { #endif /* CONFIG_ACPI_APEI_GHES */ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + FIX_ENTRY_TRAMP_DATA, FIX_ENTRY_TRAMP_TEXT, #define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT)) #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index b431a6de1b4f38..d3551e5025c024 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -982,7 +982,13 @@ alternative_else_nop_endif msr tpidrro_el0, x30 // Restored in kernel_ventry .endif tramp_map_kernel x30 +#ifdef CONFIG_RANDOMIZE_BASE + adr x30, tramp_vectors + PAGE_SIZE +alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 + ldr x30, [x30] +#else ldr x30, =vectors +#endif prfm plil1strm, [x30, #(1b - tramp_vectors)] msr vbar_el1, x30 add x30, x30, #(1b - tramp_vectors) @@ -1025,6 +1031,14 @@ END(tramp_exit_compat) .ltorg .popsection // .entry.tramp.text +#ifdef CONFIG_RANDOMIZE_BASE + .pushsection ".rodata", "a" + .align PAGE_SHIFT + .globl __entry_tramp_data_start +__entry_tramp_data_start: + .quad vectors + .popsection // .rodata +#endif /* CONFIG_RANDOMIZE_BASE */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ /* diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 6b4260f22aab3a..ddfd3c0942f7ac 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -251,7 +251,10 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) <= SZ_4K, "Hibernate exit text too big or misaligned") #endif - +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, + "Entry trampoline text too big") +#endif /* * If padding is applied before .head.text, virt<->phys conversions will fail. */ diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 3b9af178f791ba..606c3654186ffa 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -541,8 +541,16 @@ static int __init map_entry_trampoline(void) __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, prot, pgd_pgtable_alloc, 0); - /* ...as well as the kernel page table */ + /* Map both the text and data into the kernel page table */ __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + extern char __entry_tramp_data_start[]; + + __set_fixmap(FIX_ENTRY_TRAMP_DATA, + __pa_symbol(__entry_tramp_data_start), + PAGE_KERNEL_RO); + } + return 0; } core_initcall(map_entry_trampoline); From 68a65ce7039aa247b911e00ed5c743db35167781 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Nov 2017 16:15:59 +0000 Subject: [PATCH 1824/3326] arm64: use RET instruction for exiting the trampoline Commit be04a6d1126b upstream. Speculation attacks against the entry trampoline can potentially resteer the speculative instruction stream through the indirect branch and into arbitrary gadgets within the kernel. This patch defends against these attacks by forcing a misprediction through the return stack: a dummy BL instruction loads an entry into the stack, so that the predicted program flow of the subsequent RET instruction is to a branch-to-self instruction which is finally resolved as a branch to the kernel vectors with speculation suppressed. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/entry.S | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index d3551e5025c024..b32ba0332ccb42 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -981,6 +981,14 @@ alternative_else_nop_endif .if \regsize == 64 msr tpidrro_el0, x30 // Restored in kernel_ventry .endif + /* + * Defend against branch aliasing attacks by pushing a dummy + * entry onto the return stack and using a RET instruction to + * enter the full-fat kernel vectors. + */ + bl 2f + b . +2: tramp_map_kernel x30 #ifdef CONFIG_RANDOMIZE_BASE adr x30, tramp_vectors + PAGE_SIZE @@ -993,7 +1001,7 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 msr vbar_el1, x30 add x30, x30, #(1b - tramp_vectors) isb - br x30 + ret .endm .macro tramp_exit, regsize = 64 From 332e0288200f6afe070e2c126680130195cec3d9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Nov 2017 14:41:01 +0000 Subject: [PATCH 1825/3326] arm64: Kconfig: Add CONFIG_UNMAP_KERNEL_AT_EL0 Commit 084eb77cd3a8 upstream. Add a Kconfig entry to control use of the entry trampoline, which allows us to unmap the kernel whilst running in userspace and improve the robustness of KASLR. Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/Kconfig | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3cec78c81b33cf..5063d10f3cd5ab 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -806,6 +806,19 @@ config FORCE_MAX_ZONEORDER However for 4K, we choose a higher default value, 11 as opposed to 10, giving us 4M allocations matching the default size used by generic code. +config UNMAP_KERNEL_AT_EL0 + bool "Unmap kernel when running in userspace (aka \"KAISER\")" + default y + help + Some attacks against KASLR make use of the timing difference between + a permission fault which could arise from a page table entry that is + present in the TLB, and a translation fault which always requires a + page table walk. This option defends against these attacks by unmapping + the kernel whilst running in userspace, therefore forcing translation + faults for all of kernel space. + + If unsure, say Y. + menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT From 5e319f4533bfa8df7f03df501d22a169b2f03719 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 14 Nov 2017 16:19:39 +0000 Subject: [PATCH 1826/3326] arm64: Kconfig: Reword UNMAP_KERNEL_AT_EL0 kconfig entry Commit 0617052ddde3 upstream. Although CONFIG_UNMAP_KERNEL_AT_EL0 does make KASLR more robust, it's actually more useful as a mitigation against speculation attacks that can leak arbitrary kernel data to userspace through speculation. Reword the Kconfig help message to reflect this, and make the option depend on EXPERT so that it is on by default for the majority of users. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/Kconfig | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 5063d10f3cd5ab..0b6b59d93cf895 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -807,15 +807,14 @@ config FORCE_MAX_ZONEORDER 4M allocations matching the default size used by generic code. config UNMAP_KERNEL_AT_EL0 - bool "Unmap kernel when running in userspace (aka \"KAISER\")" + bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT default y help - Some attacks against KASLR make use of the timing difference between - a permission fault which could arise from a page table entry that is - present in the TLB, and a translation fault which always requires a - page table walk. This option defends against these attacks by unmapping - the kernel whilst running in userspace, therefore forcing translation - faults for all of kernel space. + Speculation attacks against some high-performance processors can + be used to bypass MMU permission checks and leak kernel data to + userspace. This can be defended against by unmapping the kernel + when running in userspace, mapping it back in on exception entry + via a trampoline page in the vector table. If unsure, say Y. From 83ae3355bdfbd44a594f18946d3a37a74529a801 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 27 Nov 2017 18:29:30 +0000 Subject: [PATCH 1827/3326] arm64: Take into account ID_AA64PFR0_EL1.CSV3 Commit 179a56f6f9fb upstream. For non-KASLR kernels where the KPTI behaviour has not been overridden on the command line we can use ID_AA64PFR0_EL1.CSV3 to determine whether or not we should unmap the kernel whilst running at EL0. Reviewed-by: Suzuki K Poulose Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kernel/cpufeature.c | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index f707fed5886f01..1efbe29c8b248e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -332,6 +332,7 @@ #define ID_AA64ISAR1_DPB_SHIFT 0 /* id_aa64pfr0 */ +#define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_GIC_SHIFT 24 #define ID_AA64PFR0_ASIMD_SHIFT 20 #define ID_AA64PFR0_FP_SHIFT 16 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 1a6ec079c541cf..378661ce1f8b87 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -125,6 +125,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), @@ -802,6 +803,8 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, int __unused) { + u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + /* Forced on command line? */ if (__kpti_forced) { pr_info_once("kernel page table isolation forced %s by command line option\n", @@ -813,7 +816,9 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) return true; - return false; + /* Defer to CPU feature registers */ + return !cpuid_feature_extract_unsigned_field(pfr0, + ID_AA64PFR0_CSV3_SHIFT); } static int __init parse_kpti(char *str) @@ -918,6 +923,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { }, #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 { + .desc = "Kernel page table isolation (KPTI)", .capability = ARM64_UNMAP_KERNEL_AT_EL0, .def_scope = SCOPE_SYSTEM, .matches = unmap_kernel_at_el0, From 5385e5fe9724479b3b9d7f1b6466b4d85f4eafa9 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Tue, 9 Jan 2018 16:12:18 +0000 Subject: [PATCH 1828/3326] arm64: capabilities: Handle duplicate entries for a capability Commit 67948af41f2e upstream. Sometimes a single capability could be listed multiple times with differing matches(), e.g, CPU errata for different MIDR versions. This breaks verify_local_cpu_feature() and this_cpu_has_cap() as we stop checking for a capability on a CPU with the first entry in the given table, which is not sufficient. Make sure we run the checks for all entries of the same capability. We do this by fixing __this_cpu_has_cap() to run through all the entries in the given table for a match and reuse it for verify_local_cpu_feature(). Cc: Mark Rutland Cc: Will Deacon Acked-by: Marc Zyngier Signed-off-by: Suzuki K Poulose Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/cpufeature.c | 44 ++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 378661ce1f8b87..c4e842f45b7c3f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1047,6 +1047,26 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) cap_set_elf_hwcap(hwcaps); } +/* + * Check if the current CPU has a given feature capability. + * Should be called from non-preemptible context. + */ +static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, + unsigned int cap) +{ + const struct arm64_cpu_capabilities *caps; + + if (WARN_ON(preemptible())) + return false; + + for (caps = cap_array; caps->desc; caps++) + if (caps->capability == cap && + caps->matches && + caps->matches(caps, SCOPE_LOCAL_CPU)) + return true; + return false; +} + void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, const char *info) { @@ -1125,8 +1145,9 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) } static void -verify_local_cpu_features(const struct arm64_cpu_capabilities *caps) +verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list) { + const struct arm64_cpu_capabilities *caps = caps_list; for (; caps->matches; caps++) { if (!cpus_have_cap(caps->capability)) continue; @@ -1134,7 +1155,7 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps) * If the new CPU misses an advertised feature, we cannot proceed * further, park the cpu. */ - if (!caps->matches(caps, SCOPE_LOCAL_CPU)) { + if (!__this_cpu_has_cap(caps_list, caps->capability)) { pr_crit("CPU%d: missing feature: %s\n", smp_processor_id(), caps->desc); cpu_die_early(); @@ -1195,25 +1216,6 @@ static void __init mark_const_caps_ready(void) static_branch_enable(&arm64_const_caps_ready); } -/* - * Check if the current CPU has a given feature capability. - * Should be called from non-preemptible context. - */ -static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, - unsigned int cap) -{ - const struct arm64_cpu_capabilities *caps; - - if (WARN_ON(preemptible())) - return false; - - for (caps = cap_array; caps->desc; caps++) - if (caps->capability == cap && caps->matches) - return caps->matches(caps, SCOPE_LOCAL_CPU); - - return false; -} - extern const struct arm64_cpu_capabilities arm64_errata[]; bool this_cpu_has_cap(unsigned int cap) From 0b2995145ea4fb9f5268f51c13d25213c039486e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 1 Dec 2017 17:33:48 +0000 Subject: [PATCH 1829/3326] arm64: mm: Introduce TTBR_ASID_MASK for getting at the ASID in the TTBR Commit b519538dfefc upstream. There are now a handful of open-coded masks to extract the ASID from a TTBR value, so introduce a TTBR_ASID_MASK and use that instead. Suggested-by: Mark Rutland Reviewed-by: Mark Rutland Tested-by: Laura Abbott Tested-by: Shanker Donthineni Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/asm-uaccess.h | 3 ++- arch/arm64/include/asm/mmu.h | 1 + arch/arm64/include/asm/uaccess.h | 4 ++-- arch/arm64/kernel/entry.S | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index 21b8cf304028b0..f4f234b6155e9b 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -17,7 +18,7 @@ msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 isb sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE - bic \tmp1, \tmp1, #(0xffff << 48) + bic \tmp1, \tmp1, #TTBR_ASID_MASK msr ttbr1_el1, \tmp1 // set reserved ASID isb .endm diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index da6f12e4071418..6f7bdb89817ffe 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -18,6 +18,7 @@ #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ #define USER_ASID_FLAG (UL(1) << 48) +#define TTBR_ASID_MASK (UL(0xffff) << 48) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 750a3b76a01c34..6eadf55ebaf042 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -112,7 +112,7 @@ static inline void __uaccess_ttbr0_disable(void) write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1); isb(); /* Set reserved ASID */ - ttbr &= ~(0xffffUL << 48); + ttbr &= ~TTBR_ASID_MASK; write_sysreg(ttbr, ttbr1_el1); isb(); } @@ -131,7 +131,7 @@ static inline void __uaccess_ttbr0_enable(void) /* Restore active ASID */ ttbr1 = read_sysreg(ttbr1_el1); - ttbr1 |= ttbr0 & (0xffffUL << 48); + ttbr1 |= ttbr0 & TTBR_ASID_MASK; write_sysreg(ttbr1, ttbr1_el1); isb(); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index b32ba0332ccb42..67b6895fb62891 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -205,7 +205,7 @@ alternative_else_nop_endif .if \el != 0 mrs x21, ttbr1_el1 - tst x21, #0xffff << 48 // Check for the reserved ASID + tst x21, #TTBR_ASID_MASK // Check for the reserved ASID orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR b.eq 1f // TTBR0 access already disabled and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR From fedf5a743cf2e024c5b557abfb1a45f3c06b3c81 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 10 Jan 2018 13:18:30 +0000 Subject: [PATCH 1830/3326] arm64: kpti: Fix the interaction between ASID switching and software PAN Commit 6b88a32c7af6 upstream. With ARM64_SW_TTBR0_PAN enabled, the exception entry code checks the active ASID to decide whether user access was enabled (non-zero ASID) when the exception was taken. On return from exception, if user access was previously disabled, it re-instates TTBR0_EL1 from the per-thread saved value (updated in switch_mm() or efi_set_pgd()). Commit 7655abb95386 ("arm64: mm: Move ASID from TTBR0 to TTBR1") makes a TTBR0_EL1 + ASID switching non-atomic. Subsequently, commit 27a921e75711 ("arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN") changes the __uaccess_ttbr0_disable() function and asm macro to first write the reserved TTBR0_EL1 followed by the ASID=0 update in TTBR1_EL1. If an exception occurs between these two, the exception return code will re-instate a valid TTBR0_EL1. Similar scenario can happen in cpu_switch_mm() between setting the reserved TTBR0_EL1 and the ASID update in cpu_do_switch_mm(). This patch reverts the entry.S check for ASID == 0 to TTBR0_EL1 and disables the interrupts around the TTBR0_EL1 and ASID switching code in __uaccess_ttbr0_disable(). It also ensures that, when returning from the EFI runtime services, efi_set_pgd() doesn't leave a non-zero ASID in TTBR1_EL1 by using uaccess_ttbr0_{enable,disable}. The accesses to current_thread_info()->ttbr0 are updated to use READ_ONCE/WRITE_ONCE. As a safety measure, __uaccess_ttbr0_enable() always masks out any existing non-zero ASID TTBR1_EL1 before writing in the new ASID. Fixes: 27a921e75711 ("arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN") Acked-by: Will Deacon Reported-by: Ard Biesheuvel Tested-by: Ard Biesheuvel Reviewed-by: James Morse Tested-by: James Morse Co-developed-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/asm-uaccess.h | 12 +++++++----- arch/arm64/include/asm/efi.h | 12 +++++++----- arch/arm64/include/asm/mmu_context.h | 3 ++- arch/arm64/include/asm/uaccess.h | 9 ++++++--- arch/arm64/kernel/entry.S | 2 +- arch/arm64/lib/clear_user.S | 2 +- arch/arm64/lib/copy_from_user.S | 2 +- arch/arm64/lib/copy_in_user.S | 2 +- arch/arm64/lib/copy_to_user.S | 2 +- arch/arm64/mm/cache.S | 2 +- arch/arm64/mm/proc.S | 3 +++ arch/arm64/xen/hypercall.S | 2 +- 12 files changed, 32 insertions(+), 21 deletions(-) diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index f4f234b6155e9b..dd49c3567f20bd 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -14,11 +14,11 @@ #ifdef CONFIG_ARM64_SW_TTBR0_PAN .macro __uaccess_ttbr0_disable, tmp1 mrs \tmp1, ttbr1_el1 // swapper_pg_dir + bic \tmp1, \tmp1, #TTBR_ASID_MASK add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 isb sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE - bic \tmp1, \tmp1, #TTBR_ASID_MASK msr ttbr1_el1, \tmp1 // set reserved ASID isb .endm @@ -35,9 +35,11 @@ isb .endm - .macro uaccess_ttbr0_disable, tmp1 + .macro uaccess_ttbr0_disable, tmp1, tmp2 alternative_if_not ARM64_HAS_PAN + save_and_disable_irq \tmp2 // avoid preemption __uaccess_ttbr0_disable \tmp1 + restore_irq \tmp2 alternative_else_nop_endif .endm @@ -49,7 +51,7 @@ alternative_if_not ARM64_HAS_PAN alternative_else_nop_endif .endm #else - .macro uaccess_ttbr0_disable, tmp1 + .macro uaccess_ttbr0_disable, tmp1, tmp2 .endm .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 @@ -59,8 +61,8 @@ alternative_else_nop_endif /* * These macros are no-ops when UAO is present. */ - .macro uaccess_disable_not_uao, tmp1 - uaccess_ttbr0_disable \tmp1 + .macro uaccess_disable_not_uao, tmp1, tmp2 + uaccess_ttbr0_disable \tmp1, \tmp2 alternative_if ARM64_ALT_PAN_NOT_UAO SET_PSTATE_PAN(1) alternative_else_nop_endif diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index c4cd5081d78bc4..8389050328bba2 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -121,19 +121,21 @@ static inline void efi_set_pgd(struct mm_struct *mm) if (mm != current->active_mm) { /* * Update the current thread's saved ttbr0 since it is - * restored as part of a return from exception. Set - * the hardware TTBR0_EL1 using cpu_switch_mm() - * directly to enable potential errata workarounds. + * restored as part of a return from exception. Enable + * access to the valid TTBR0_EL1 and invoke the errata + * workaround directly since there is no return from + * exception when invoking the EFI run-time services. */ update_saved_ttbr0(current, mm); - cpu_switch_mm(mm->pgd, mm); + uaccess_ttbr0_enable(); + post_ttbr_update_workaround(); } else { /* * Defer the switch to the current thread's TTBR0_EL1 * until uaccess_enable(). Restore the current * thread's saved ttbr0 corresponding to its active_mm */ - cpu_set_reserved_ttbr0(); + uaccess_ttbr0_disable(); update_saved_ttbr0(current, current->active_mm); } } diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index da29766a181ca7..779d7a2ec5eccb 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -175,7 +175,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, else ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; - task_thread_info(tsk)->ttbr0 = ttbr; + WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr); } #else static inline void update_saved_ttbr0(struct task_struct *tsk, @@ -230,6 +230,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, #define activate_mm(prev,next) switch_mm(prev, next, current) void verify_cpu_asid_bits(void); +void post_ttbr_update_workaround(void); #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 6eadf55ebaf042..335dfe02a846fe 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -105,16 +105,18 @@ static inline void set_fs(mm_segment_t fs) #ifdef CONFIG_ARM64_SW_TTBR0_PAN static inline void __uaccess_ttbr0_disable(void) { - unsigned long ttbr; + unsigned long flags, ttbr; + local_irq_save(flags); ttbr = read_sysreg(ttbr1_el1); + ttbr &= ~TTBR_ASID_MASK; /* reserved_ttbr0 placed at the end of swapper_pg_dir */ write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1); isb(); /* Set reserved ASID */ - ttbr &= ~TTBR_ASID_MASK; write_sysreg(ttbr, ttbr1_el1); isb(); + local_irq_restore(flags); } static inline void __uaccess_ttbr0_enable(void) @@ -127,10 +129,11 @@ static inline void __uaccess_ttbr0_enable(void) * roll-over and an update of 'ttbr0'. */ local_irq_save(flags); - ttbr0 = current_thread_info()->ttbr0; + ttbr0 = READ_ONCE(current_thread_info()->ttbr0); /* Restore active ASID */ ttbr1 = read_sysreg(ttbr1_el1); + ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ ttbr1 |= ttbr0 & TTBR_ASID_MASK; write_sysreg(ttbr1, ttbr1_el1); isb(); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 67b6895fb62891..8d7112aa8afe50 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -204,7 +204,7 @@ alternative_if ARM64_HAS_PAN alternative_else_nop_endif .if \el != 0 - mrs x21, ttbr1_el1 + mrs x21, ttbr0_el1 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR b.eq 1f // TTBR0 access already disabled diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index 8f9c4641e706b2..3d69a8d41fa5e5 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -50,7 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 b.mi 5f uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 5: mov x0, #0 - uaccess_disable_not_uao x2 + uaccess_disable_not_uao x2, x3 ret ENDPROC(__clear_user) diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 69d86a80f3e2a3..20305d48504675 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -67,7 +67,7 @@ ENTRY(__arch_copy_from_user) uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3 + uaccess_disable_not_uao x3, x4 mov x0, #0 // Nothing to copy ret ENDPROC(__arch_copy_from_user) diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index e442b531252ad8..fbb090f431a5c0 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -68,7 +68,7 @@ ENTRY(raw_copy_in_user) uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3 + uaccess_disable_not_uao x3, x4 mov x0, #0 ret ENDPROC(raw_copy_in_user) diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 318f15d5c336cc..fda6172d6b8881 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -66,7 +66,7 @@ ENTRY(__arch_copy_to_user) uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3 + uaccess_disable_not_uao x3, x4 mov x0, #0 ret ENDPROC(__arch_copy_to_user) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 6cd20a8c0952a1..91464e7f77cc33 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -72,7 +72,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU isb mov x0, #0 1: - uaccess_ttbr0_disable x1 + uaccess_ttbr0_disable x1, x2 ret 9: mov x0, #-EFAULT diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 447537c1699dd8..2d406b4b522f84 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -140,6 +140,9 @@ ENDPROC(cpu_do_resume) ENTRY(cpu_do_switch_mm) mrs x2, ttbr1_el1 mmid x1, x1 // get mm->context.id +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + bfi x0, x1, #48, #16 // set the ASID field in TTBR0 +#endif bfi x2, x1, #48, #16 // set the ASID msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) isb diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S index acdbd2c9e899c1..c5f05c4a4d0088 100644 --- a/arch/arm64/xen/hypercall.S +++ b/arch/arm64/xen/hypercall.S @@ -107,6 +107,6 @@ ENTRY(privcmd_call) /* * Disable userspace access from kernel once the hyp call completed. */ - uaccess_ttbr0_disable x6 + uaccess_ttbr0_disable x6, x7 ret ENDPROC(privcmd_call); From 7aca19ea5a45e575de769578d5d78ab7a540ebcc Mon Sep 17 00:00:00 2001 From: Jayachandran C Date: Sun, 7 Jan 2018 22:53:35 -0800 Subject: [PATCH 1831/3326] arm64: cputype: Add MIDR values for Cavium ThunderX2 CPUs Commit 0d90718871fe upstream. Add the older Broadcom ID as well as the new Cavium ID for ThunderX2 CPUs. Signed-off-by: Jayachandran C Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/cputype.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index da036fe685732c..157afb9f7a3516 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -87,6 +87,7 @@ #define CAVIUM_CPU_PART_THUNDERX 0x0A1 #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2 #define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3 +#define CAVIUM_CPU_PART_THUNDERX2 0x0AF #define BRCM_CPU_PART_VULCAN 0x516 @@ -100,6 +101,8 @@ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) +#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2) +#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN) #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) From c98c8c23585643cbf255415b02cb32c95baac82e Mon Sep 17 00:00:00 2001 From: Jayachandran C Date: Fri, 19 Jan 2018 04:22:48 -0800 Subject: [PATCH 1832/3326] arm64: Turn on KPTI only on CPUs that need it Commit 0ba2e29c7fc1 upstream. Whitelist Broadcom Vulcan/Cavium ThunderX2 processors in unmap_kernel_at_el0(). These CPUs are not vulnerable to CVE-2017-5754 and do not need KPTI when KASLR is off. Acked-by: Will Deacon Signed-off-by: Jayachandran C Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/cpufeature.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c4e842f45b7c3f..46b1507ea60557 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -816,6 +816,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) return true; + /* Don't force KPTI for CPUs that are not vulnerable */ + switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) { + case MIDR_CAVIUM_THUNDERX2: + case MIDR_BRCM_VULCAN: + return false; + } + /* Defer to CPU feature registers */ return !cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV3_SHIFT); From 69288201803aecffd7f17ffa9c0c5043ffb8367c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 29 Jan 2018 11:59:53 +0000 Subject: [PATCH 1833/3326] arm64: kpti: Make use of nG dependent on arm64_kernel_unmapped_at_el0() Commit 41acec624087 upstream. To allow systems which do not require kpti to continue running with global kernel mappings (which appears to be a requirement for Cavium ThunderX due to a CPU erratum), make the use of nG in the kernel page tables dependent on arm64_kernel_unmapped_at_el0(), which is resolved at runtime. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/kernel-pgtable.h | 12 ++-------- arch/arm64/include/asm/pgtable-prot.h | 30 ++++++++++++------------- 2 files changed, 16 insertions(+), 26 deletions(-) diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 77a27af013710d..7803343e5881fb 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -78,16 +78,8 @@ /* * Initial memory map attributes. */ -#define _SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) -#define _SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) - -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -#define SWAPPER_PTE_FLAGS (_SWAPPER_PTE_FLAGS | PTE_NG) -#define SWAPPER_PMD_FLAGS (_SWAPPER_PMD_FLAGS | PMD_SECT_NG) -#else -#define SWAPPER_PTE_FLAGS _SWAPPER_PTE_FLAGS -#define SWAPPER_PMD_FLAGS _SWAPPER_PMD_FLAGS -#endif +#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) +#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) #if ARM64_SWAPPER_USES_SECTION_MAPS #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 22a926825e3fb8..2db84df5eb4224 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -37,13 +37,11 @@ #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -#define PROT_DEFAULT (_PROT_DEFAULT | PTE_NG) -#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_SECT_NG) -#else -#define PROT_DEFAULT _PROT_DEFAULT -#define PROT_SECT_DEFAULT _PROT_SECT_DEFAULT -#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ +#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) +#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) + +#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) +#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) @@ -55,22 +53,22 @@ #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) -#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) -#define _HYP_PAGE_DEFAULT (_PAGE_DEFAULT & ~PTE_NG) +#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) +#define _HYP_PAGE_DEFAULT _PAGE_DEFAULT -#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) -#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) -#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) -#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) -#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) +#define PAGE_KERNEL __pgprot(PROT_NORMAL) +#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY) +#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY) +#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN) +#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT) #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) -#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) -#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) +#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) +#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) From ee28fed5ccc66fbdef4119451aa6754501705a7e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 29 Jan 2018 11:59:54 +0000 Subject: [PATCH 1834/3326] arm64: mm: Permit transitioning from Global to Non-Global without BBM Commit 4e6020565596 upstream. Break-before-make is not needed when transitioning from Global to Non-Global mappings, provided that the contiguous hint is not being used. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/mm/mmu.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 606c3654186ffa..fa20124c19d57a 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -117,6 +117,10 @@ static bool pgattr_change_is_safe(u64 old, u64 new) if ((old | new) & PTE_CONT) return false; + /* Transitioning from Global to Non-Global is safe */ + if (((old ^ new) == PTE_NG) && (new & PTE_NG)) + return true; + return ((old ^ new) & ~mask) == 0; } From 2feb36ebe4503551927d9c798cd454b7f01bd442 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 6 Feb 2018 22:22:50 +0000 Subject: [PATCH 1835/3326] arm64: kpti: Add ->enable callback to remap swapper using nG mappings Commit f992b4dfd58b upstream. Defaulting to global mappings for kernel space is generally good for performance and appears to be necessary for Cavium ThunderX. If we subsequently decide that we need to enable kpti, then we need to rewrite our existing page table entries to be non-global. This is fiddly, and made worse by the possible use of contiguous mappings, which require a strict break-before-make sequence. Since the enable callback runs on each online CPU from stop_machine context, we can have all CPUs enter the idmap, where secondaries can wait for the primary CPU to rewrite swapper with its MMU off. It's all fairly horrible, but at least it only runs once. Tested-by: Marc Zyngier Reviewed-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/assembler.h | 4 + arch/arm64/kernel/cpufeature.c | 25 ++++ arch/arm64/mm/proc.S | 201 ++++++++++++++++++++++++++++- 3 files changed, 223 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index a1e5c1eb3c85a6..c8ec722cc85538 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -486,4 +486,8 @@ alternative_else_nop_endif #endif .endm + .macro pte_to_phys, phys, pte + and \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT) + .endm + #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 46b1507ea60557..194dd5dbf3288a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -828,6 +828,30 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, ID_AA64PFR0_CSV3_SHIFT); } +static int kpti_install_ng_mappings(void *__unused) +{ + typedef void (kpti_remap_fn)(int, int, phys_addr_t); + extern kpti_remap_fn idmap_kpti_install_ng_mappings; + kpti_remap_fn *remap_fn; + + static bool kpti_applied = false; + int cpu = smp_processor_id(); + + if (kpti_applied) + return 0; + + remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); + + cpu_install_idmap(); + remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir)); + cpu_uninstall_idmap(); + + if (!cpu) + kpti_applied = true; + + return 0; +} + static int __init parse_kpti(char *str) { bool enabled; @@ -934,6 +958,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_UNMAP_KERNEL_AT_EL0, .def_scope = SCOPE_SYSTEM, .matches = unmap_kernel_at_el0, + .enable = kpti_install_ng_mappings, }, #endif { diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 2d406b4b522f84..68eabe878dad1c 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -153,6 +153,16 @@ ENTRY(cpu_do_switch_mm) ENDPROC(cpu_do_switch_mm) .pushsection ".idmap.text", "ax" + +.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 + adrp \tmp1, empty_zero_page + msr ttbr1_el1, \tmp2 + isb + tlbi vmalle1 + dsb nsh + isb +.endm + /* * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd) * @@ -163,13 +173,7 @@ ENTRY(idmap_cpu_replace_ttbr1) mrs x2, daif msr daifset, #0xf - adrp x1, empty_zero_page - msr ttbr1_el1, x1 - isb - - tlbi vmalle1 - dsb nsh - isb + __idmap_cpu_set_reserved_ttbr1 x1, x3 msr ttbr1_el1, x0 isb @@ -180,6 +184,189 @@ ENTRY(idmap_cpu_replace_ttbr1) ENDPROC(idmap_cpu_replace_ttbr1) .popsection +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + .pushsection ".idmap.text", "ax" + + .macro __idmap_kpti_get_pgtable_ent, type + dc cvac, cur_\()\type\()p // Ensure any existing dirty + dmb sy // lines are written back before + ldr \type, [cur_\()\type\()p] // loading the entry + tbz \type, #0, next_\()\type // Skip invalid entries + .endm + + .macro __idmap_kpti_put_pgtable_ent_ng, type + orr \type, \type, #PTE_NG // Same bit for blocks and pages + str \type, [cur_\()\type\()p] // Update the entry and ensure it + dc civac, cur_\()\type\()p // is visible to all CPUs. + .endm + +/* + * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper) + * + * Called exactly once from stop_machine context by each CPU found during boot. + */ +__idmap_kpti_flag: + .long 1 +ENTRY(idmap_kpti_install_ng_mappings) + cpu .req w0 + num_cpus .req w1 + swapper_pa .req x2 + swapper_ttb .req x3 + flag_ptr .req x4 + cur_pgdp .req x5 + end_pgdp .req x6 + pgd .req x7 + cur_pudp .req x8 + end_pudp .req x9 + pud .req x10 + cur_pmdp .req x11 + end_pmdp .req x12 + pmd .req x13 + cur_ptep .req x14 + end_ptep .req x15 + pte .req x16 + + mrs swapper_ttb, ttbr1_el1 + adr flag_ptr, __idmap_kpti_flag + + cbnz cpu, __idmap_kpti_secondary + + /* We're the boot CPU. Wait for the others to catch up */ + sevl +1: wfe + ldaxr w18, [flag_ptr] + eor w18, w18, num_cpus + cbnz w18, 1b + + /* We need to walk swapper, so turn off the MMU. */ + mrs x18, sctlr_el1 + bic x18, x18, #SCTLR_ELx_M + msr sctlr_el1, x18 + isb + + /* Everybody is enjoying the idmap, so we can rewrite swapper. */ + /* PGD */ + mov cur_pgdp, swapper_pa + add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) +do_pgd: __idmap_kpti_get_pgtable_ent pgd + tbnz pgd, #1, walk_puds + __idmap_kpti_put_pgtable_ent_ng pgd +next_pgd: + add cur_pgdp, cur_pgdp, #8 + cmp cur_pgdp, end_pgdp + b.ne do_pgd + + /* Publish the updated tables and nuke all the TLBs */ + dsb sy + tlbi vmalle1is + dsb ish + isb + + /* We're done: fire up the MMU again */ + mrs x18, sctlr_el1 + orr x18, x18, #SCTLR_ELx_M + msr sctlr_el1, x18 + isb + + /* Set the flag to zero to indicate that we're all done */ + str wzr, [flag_ptr] + ret + + /* PUD */ +walk_puds: + .if CONFIG_PGTABLE_LEVELS > 3 + pte_to_phys cur_pudp, pgd + add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) +do_pud: __idmap_kpti_get_pgtable_ent pud + tbnz pud, #1, walk_pmds + __idmap_kpti_put_pgtable_ent_ng pud +next_pud: + add cur_pudp, cur_pudp, 8 + cmp cur_pudp, end_pudp + b.ne do_pud + b next_pgd + .else /* CONFIG_PGTABLE_LEVELS <= 3 */ + mov pud, pgd + b walk_pmds +next_pud: + b next_pgd + .endif + + /* PMD */ +walk_pmds: + .if CONFIG_PGTABLE_LEVELS > 2 + pte_to_phys cur_pmdp, pud + add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) +do_pmd: __idmap_kpti_get_pgtable_ent pmd + tbnz pmd, #1, walk_ptes + __idmap_kpti_put_pgtable_ent_ng pmd +next_pmd: + add cur_pmdp, cur_pmdp, #8 + cmp cur_pmdp, end_pmdp + b.ne do_pmd + b next_pud + .else /* CONFIG_PGTABLE_LEVELS <= 2 */ + mov pmd, pud + b walk_ptes +next_pmd: + b next_pud + .endif + + /* PTE */ +walk_ptes: + pte_to_phys cur_ptep, pmd + add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) +do_pte: __idmap_kpti_get_pgtable_ent pte + __idmap_kpti_put_pgtable_ent_ng pte +next_pte: + add cur_ptep, cur_ptep, #8 + cmp cur_ptep, end_ptep + b.ne do_pte + b next_pmd + + /* Secondary CPUs end up here */ +__idmap_kpti_secondary: + /* Uninstall swapper before surgery begins */ + __idmap_cpu_set_reserved_ttbr1 x18, x17 + + /* Increment the flag to let the boot CPU we're ready */ +1: ldxr w18, [flag_ptr] + add w18, w18, #1 + stxr w17, w18, [flag_ptr] + cbnz w17, 1b + + /* Wait for the boot CPU to finish messing around with swapper */ + sevl +1: wfe + ldxr w18, [flag_ptr] + cbnz w18, 1b + + /* All done, act like nothing happened */ + msr ttbr1_el1, swapper_ttb + isb + ret + + .unreq cpu + .unreq num_cpus + .unreq swapper_pa + .unreq swapper_ttb + .unreq flag_ptr + .unreq cur_pgdp + .unreq end_pgdp + .unreq pgd + .unreq cur_pudp + .unreq end_pudp + .unreq pud + .unreq cur_pmdp + .unreq end_pmdp + .unreq pmd + .unreq cur_ptep + .unreq end_ptep + .unreq pte +ENDPROC(idmap_kpti_install_ng_mappings) + .popsection +#endif + /* * __cpu_setup * From e39247ca17146e8baae3b0703fe5f27298a60189 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 29 Jan 2018 11:59:56 +0000 Subject: [PATCH 1836/3326] arm64: Force KPTI to be disabled on Cavium ThunderX Commit 6dc52b15c4a4 upstream. Cavium ThunderX's erratum 27456 results in a corruption of icache entries that are loaded from memory that is mapped as non-global (i.e. ASID-tagged). As KPTI is based on memory being mapped non-global, let's prevent it from kicking in if this erratum is detected. Signed-off-by: Marc Zyngier [will: Update comment] Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/cpufeature.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 194dd5dbf3288a..451b319323c1da 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -803,12 +803,23 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, int __unused) { + char const *str = "command line option"; u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); - /* Forced on command line? */ + /* + * For reasons that aren't entirely clear, enabling KPTI on Cavium + * ThunderX leads to apparent I-cache corruption of kernel text, which + * ends as well as you might imagine. Don't even try. + */ + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) { + str = "ARM64_WORKAROUND_CAVIUM_27456"; + __kpti_forced = -1; + } + + /* Forced? */ if (__kpti_forced) { - pr_info_once("kernel page table isolation forced %s by command line option\n", - __kpti_forced > 0 ? "ON" : "OFF"); + pr_info_once("kernel page table isolation forced %s by %s\n", + __kpti_forced > 0 ? "ON" : "OFF", str); return __kpti_forced > 0; } From 8703f27d7c5db70ffd793a813160f28e810cc1bc Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 29 Jan 2018 11:59:58 +0000 Subject: [PATCH 1837/3326] arm64: entry: Reword comment about post_ttbr_update_workaround Commit f167211a93ac upstream. We don't fully understand the Cavium ThunderX erratum, but it appears that mapping the kernel as nG can lead to horrible consequences such as attempting to execute userspace from kernel context. Since kpti isn't enabled for these CPUs anyway, simplify the comment justifying the lack of post_ttbr_update_workaround in the exception trampoline. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/entry.S | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 8d7112aa8afe50..9766039eef31d7 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -962,16 +962,9 @@ alternative_else_nop_endif orr \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp /* - * We avoid running the post_ttbr_update_workaround here because the - * user and kernel ASIDs don't have conflicting mappings, so any - * "blessing" as described in: - * - * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com - * - * will not hurt correctness. Whilst this may partially defeat the - * point of using split ASIDs in the first place, it avoids - * the hit of invalidating the entire I-cache on every return to - * userspace. + * We avoid running the post_ttbr_update_workaround here because + * it's only needed by Cavium ThunderX, which requires KPTI to be + * disabled. */ .endm From 1449a173a2ee0db4fef6669c1bdc71a5561cb267 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 29 Jan 2018 12:00:00 +0000 Subject: [PATCH 1838/3326] arm64: idmap: Use "awx" flags for .idmap.text .pushsection directives Commit 439e70e27a51 upstream. The identity map is mapped as both writeable and executable by the SWAPPER_MM_MMUFLAGS and this is relied upon by the kpti code to manage a synchronisation flag. Update the .pushsection flags to reflect the actual mapping attributes. Reported-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/cpu-reset.S | 2 +- arch/arm64/kernel/head.S | 2 +- arch/arm64/kernel/sleep.S | 2 +- arch/arm64/mm/proc.S | 8 ++++---- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index 2a752cb2a0f35a..8021b46c97431d 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S @@ -16,7 +16,7 @@ #include .text -.pushsection .idmap.text, "ax" +.pushsection .idmap.text, "awx" /* * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 6657ae4f3c7a39..261f3f88364c01 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -371,7 +371,7 @@ ENDPROC(__primary_switched) * end early head section, begin head code that is also used for * hotplug and needs to have the same protections as the text region */ - .section ".idmap.text","ax" + .section ".idmap.text","awx" ENTRY(kimage_vaddr) .quad _text - TEXT_OFFSET diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 10dd16d7902d28..bebec8ef9372af 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -96,7 +96,7 @@ ENTRY(__cpu_suspend_enter) ret ENDPROC(__cpu_suspend_enter) - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly bl __cpu_setup diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 68eabe878dad1c..c25e58bc291005 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -86,7 +86,7 @@ ENDPROC(cpu_do_suspend) * * x0: Address of context pointer */ - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" ENTRY(cpu_do_resume) ldp x2, x3, [x0] ldp x4, x5, [x0, #16] @@ -152,7 +152,7 @@ ENTRY(cpu_do_switch_mm) ret ENDPROC(cpu_do_switch_mm) - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 adrp \tmp1, empty_zero_page @@ -185,7 +185,7 @@ ENDPROC(idmap_cpu_replace_ttbr1) .popsection #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" .macro __idmap_kpti_get_pgtable_ent, type dc cvac, cur_\()\type\()p // Ensure any existing dirty @@ -373,7 +373,7 @@ ENDPROC(idmap_kpti_install_ng_mappings) * Initialise the processor for turning the MMU on. Return in x0 the * value of the SCTLR_EL1 register. */ - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" ENTRY(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh From 6afdaf109c34acd9cbc85ef687e3e4e991e5fd89 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 5 Feb 2018 15:34:16 +0000 Subject: [PATCH 1839/3326] arm64: barrier: Add CSDB macros to control data-value prediction Commit 669474e772b9 upstream. For CPUs capable of data value prediction, CSDB waits for any outstanding predictions to architecturally resolve before allowing speculative execution to continue. Provide macros to expose it to the arch code. Reviewed-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/assembler.h | 7 +++++++ arch/arm64/include/asm/barrier.h | 2 ++ 2 files changed, 9 insertions(+) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index c8ec722cc85538..20f5845e529533 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -95,6 +95,13 @@ dmb \opt .endm +/* + * Value prediction barrier + */ + .macro csdb + hint #20 + .endm + /* * NOP sequence */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 0fe7e43b7fbc26..c68fdc5707edbc 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -31,6 +31,8 @@ #define dmb(opt) asm volatile("dmb " #opt : : : "memory") #define dsb(opt) asm volatile("dsb " #opt : : : "memory") +#define csdb() asm volatile("hint #20" : : : "memory") + #define mb() dsb(sy) #define rmb() dsb(ld) #define wmb() dsb(st) From 0a532ea3ef1481c5af869728abac9179609acba8 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 5 Feb 2018 15:34:17 +0000 Subject: [PATCH 1840/3326] arm64: Implement array_index_mask_nospec() Commit 022620eed3d0 upstream. Provide an optimised, assembly implementation of array_index_mask_nospec() for arm64 so that the compiler is not in a position to transform the code in ways which affect its ability to inhibit speculation (e.g. by introducing conditional branches). This is similar to the sequence used by x86, modulo architectural differences in the carry/borrow flags. Reviewed-by: Mark Rutland Signed-off-by: Robin Murphy Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/barrier.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index c68fdc5707edbc..0b0755c961ac13 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -40,6 +40,27 @@ #define dma_rmb() dmb(oshld) #define dma_wmb() dmb(oshst) +/* + * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz + * and 0 otherwise. + */ +#define array_index_mask_nospec array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long idx, + unsigned long sz) +{ + unsigned long mask; + + asm volatile( + " cmp %1, %2\n" + " sbc %0, xzr, xzr\n" + : "=r" (mask) + : "r" (idx), "Ir" (sz) + : "cc"); + + csdb(); + return mask; +} + #define __smp_mb() dmb(ish) #define __smp_rmb() dmb(ishld) #define __smp_wmb() dmb(ishst) From 535357c9d3e94115b87e11a3014ea29c8a0c26fb Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 5 Feb 2018 15:34:18 +0000 Subject: [PATCH 1841/3326] arm64: Make USER_DS an inclusive limit Commit 51369e398d0d upstream. Currently, USER_DS represents an exclusive limit while KERNEL_DS is inclusive. In order to do some clever trickery for speculation-safe masking, we need them both to behave equivalently - there aren't enough bits to make KERNEL_DS exclusive, so we have precisely one option. This also happens to correct a longstanding false negative for a range ending on the very top byte of kernel memory. Mark Rutland points out that we've actually got the semantics of addresses vs. segments muddled up in most of the places we need to amend, so shuffle the {USER,KERNEL}_DS definitions around such that we can correct those properly instead of just pasting "-1"s everywhere. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/processor.h | 3 ++ arch/arm64/include/asm/uaccess.h | 45 +++++++++++++++++------------- arch/arm64/kernel/entry.S | 4 +-- arch/arm64/mm/fault.c | 4 +-- 4 files changed, 33 insertions(+), 23 deletions(-) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 7dddca21fc64e2..fda6f5812281c2 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -21,6 +21,9 @@ #define TASK_SIZE_64 (UL(1) << VA_BITS) +#define KERNEL_DS UL(-1) +#define USER_DS (TASK_SIZE_64 - 1) + #ifndef __ASSEMBLY__ /* diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 335dfe02a846fe..87a6a52cee65b0 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -35,10 +35,7 @@ #include #include -#define KERNEL_DS (-1UL) #define get_ds() (KERNEL_DS) - -#define USER_DS TASK_SIZE_64 #define get_fs() (current_thread_info()->addr_limit) static inline void set_fs(mm_segment_t fs) @@ -66,22 +63,32 @@ static inline void set_fs(mm_segment_t fs) * Returns 1 if the range is valid, 0 otherwise. * * This is equivalent to the following test: - * (u65)addr + (u65)size <= current->addr_limit - * - * This needs 65-bit arithmetic. + * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 */ -#define __range_ok(addr, size) \ -({ \ - unsigned long __addr = (unsigned long)(addr); \ - unsigned long flag, roksum; \ - __chk_user_ptr(addr); \ - asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ - : "=&r" (flag), "=&r" (roksum) \ - : "1" (__addr), "Ir" (size), \ - "r" (current_thread_info()->addr_limit) \ - : "cc"); \ - flag; \ -}) +static inline unsigned long __range_ok(unsigned long addr, unsigned long size) +{ + unsigned long limit = current_thread_info()->addr_limit; + + __chk_user_ptr(addr); + asm volatile( + // A + B <= C + 1 for all A,B,C, in four easy steps: + // 1: X = A + B; X' = X % 2^64 + " adds %0, %0, %2\n" + // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 + " csel %1, xzr, %1, hi\n" + // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' + // to compensate for the carry flag being set in step 4. For + // X > 2^64, X' merely has to remain nonzero, which it does. + " csinv %0, %0, xzr, cc\n" + // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 + // comes from the carry in being clear. Otherwise, we are + // testing X' - C == 0, subject to the previous adjustments. + " sbcs xzr, %0, %1\n" + " cset %0, ls\n" + : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc"); + + return addr; +} /* * When dealing with data aborts, watchpoints, or instruction traps we may end @@ -90,7 +97,7 @@ static inline void set_fs(mm_segment_t fs) */ #define untagged_addr(addr) sign_extend64(addr, 55) -#define access_ok(type, addr, size) __range_ok(addr, size) +#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size) #define user_addr_max get_fs #define _ASM_EXTABLE(from, to) \ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9766039eef31d7..206186e1f17f33 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -167,10 +167,10 @@ alternative_else_nop_endif .else add x21, sp, #S_FRAME_SIZE get_thread_info tsk - /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ + /* Save the task's original addr_limit and set USER_DS */ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] str x20, [sp, #S_ORIG_ADDR_LIMIT] - mov x20, #TASK_SIZE_64 + mov x20, #USER_DS str x20, [tsk, #TSK_TI_ADDR_LIMIT] /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ .endif /* \el == 0 */ diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index b64958b23a7fa1..2789f46622864f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -242,7 +242,7 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs, if (fsc_type == ESR_ELx_FSC_PERM) return true; - if (addr < USER_DS && system_uses_ttbr0_pan()) + if (addr < TASK_SIZE && system_uses_ttbr0_pan()) return fsc_type == ESR_ELx_FSC_FAULT && (regs->pstate & PSR_PAN_BIT); @@ -426,7 +426,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, mm_flags |= FAULT_FLAG_WRITE; } - if (addr < USER_DS && is_permission_fault(esr, regs, addr)) { + if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) { /* regs->orig_addr_limit may be 0 if we entered from EL0 */ if (regs->orig_addr_limit == KERNEL_DS) die("Accessing user space memory with fs=KERNEL_DS", regs, esr); From 2e985d2647a07bad21892811e8eb85f5231a1d4a Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 5 Feb 2018 15:34:19 +0000 Subject: [PATCH 1842/3326] arm64: Use pointer masking to limit uaccess speculation Commit 4d8efc2d5ee4 upstream. Similarly to x86, mitigate speculation past an access_ok() check by masking the pointer against the address limit before use. Even if we don't expect speculative writes per se, it is plausible that a CPU may still speculate at least as far as fetching a cache line for writing, hence we also harden put_user() and clear_user() for peace of mind. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/uaccess.h | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 87a6a52cee65b0..6d0c7179a9908b 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -215,6 +215,26 @@ static inline void uaccess_enable_not_uao(void) __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); } +/* + * Sanitise a uaccess pointer such that it becomes NULL if above the + * current addr_limit. + */ +#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) +static inline void __user *__uaccess_mask_ptr(const void __user *ptr) +{ + void __user *safe_ptr; + + asm volatile( + " bics xzr, %1, %2\n" + " csel %0, %1, xzr, eq\n" + : "=&r" (safe_ptr) + : "r" (ptr), "r" (current_thread_info()->addr_limit) + : "cc"); + + csdb(); + return safe_ptr; +} + /* * The "__xxx" versions of the user access functions do not verify the address * space - it must have been done previously with a separate "access_ok()" @@ -285,7 +305,7 @@ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ - __get_user((x), __p) : \ + __p = uaccess_mask_ptr(__p), __get_user((x), __p) : \ ((x) = 0, -EFAULT); \ }) @@ -349,7 +369,7 @@ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ - __put_user((x), __p) : \ + __p = uaccess_mask_ptr(__p), __put_user((x), __p) : \ -EFAULT; \ }) @@ -365,7 +385,7 @@ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) - n = __clear_user(to, n); + n = __clear_user(__uaccess_mask_ptr(to), n); return n; } From 7a51d7d2f7f77cea72fe0f73bd40d39852cd0702 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 5 Feb 2018 15:34:20 +0000 Subject: [PATCH 1843/3326] arm64: entry: Ensure branch through syscall table is bounded under speculation Commit 6314d90e6493 upstream. In a similar manner to array_index_mask_nospec, this patch introduces an assembly macro (mask_nospec64) which can be used to bound a value under speculation. This macro is then used to ensure that the indirect branch through the syscall table is bounded under speculation, with out-of-range addresses speculating as calls to sys_io_setup (0). Reviewed-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/assembler.h | 11 +++++++++++ arch/arm64/kernel/entry.S | 2 ++ 2 files changed, 13 insertions(+) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 20f5845e529533..6d951a82e656ad 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -102,6 +102,17 @@ hint #20 .endm +/* + * Sanitise a 64-bit bounded index wrt speculation, returning zero if out + * of bounds. + */ + .macro mask_nospec64, idx, limit, tmp + sub \tmp, \idx, \limit + bic \tmp, \tmp, \idx + and \idx, \idx, \tmp, asr #63 + csdb + .endm + /* * NOP sequence */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 206186e1f17f33..f786e8d3d5bee1 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -376,6 +376,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 * x7 is reserved for the system call number in 32-bit mode. */ wsc_nr .req w25 // number of system calls +xsc_nr .req x25 // number of system calls (zero-extended) wscno .req w26 // syscall number xscno .req x26 // syscall number (zero-extended) stbl .req x27 // syscall table pointer @@ -884,6 +885,7 @@ el0_svc_naked: // compat entry point b.ne __sys_trace cmp wscno, wsc_nr // check upper syscall limit b.hs ni_sys + mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number ldr x16, [stbl, xscno, lsl #3] // address in the syscall table blr x16 // call sys_* routine b ret_fast_syscall From 1ccaee9dea60f97e2f64fe17b8c23ff06fe95041 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 5 Feb 2018 15:34:21 +0000 Subject: [PATCH 1844/3326] arm64: uaccess: Prevent speculative use of the current addr_limit Commit c2f0ad4fc089 upstream. A mispredicted conditional call to set_fs could result in the wrong addr_limit being forwarded under speculation to a subsequent access_ok check, potentially forming part of a spectre-v1 attack using uaccess routines. This patch prevents this forwarding from taking place, but putting heavy barriers in set_fs after writing the addr_limit. Reviewed-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/uaccess.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 6d0c7179a9908b..2f5dd32f5aef3a 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -42,6 +42,13 @@ static inline void set_fs(mm_segment_t fs) { current_thread_info()->addr_limit = fs; + /* + * Prevent a mispredicted conditional call to set_fs from forwarding + * the wrong address limit to access_ok under speculation. + */ + dsb(nsh); + isb(); + /* On user-mode return, check fs is correct */ set_thread_flag(TIF_FSCHECK); From 28d8886d985ccc84e4e8f567c60f491acb1ae51c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 5 Feb 2018 15:34:22 +0000 Subject: [PATCH 1845/3326] arm64: uaccess: Don't bother eliding access_ok checks in __{get, put}_user Commit 84624087dd7e upstream. access_ok isn't an expensive operation once the addr_limit for the current thread has been loaded into the cache. Given that the initial access_ok check preceding a sequence of __{get,put}_user operations will take the brunt of the miss, we can make the __* variants identical to the full-fat versions, which brings with it the benefits of address masking. The likely cost in these sequences will be from toggling PAN/UAO, which we can address later by implementing the *_unsafe versions. Reviewed-by: Robin Murphy Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/uaccess.h | 54 +++++++++++++++++++------------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 2f5dd32f5aef3a..0687bb084719a4 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -294,28 +294,33 @@ do { \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) -#define __get_user(x, ptr) \ +#define __get_user_check(x, ptr, err) \ ({ \ - int __gu_err = 0; \ - __get_user_err((x), (ptr), __gu_err); \ - __gu_err; \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ + __p = uaccess_mask_ptr(__p); \ + __get_user_err((x), __p, (err)); \ + } else { \ + (x) = 0; (err) = -EFAULT; \ + } \ }) #define __get_user_error(x, ptr, err) \ ({ \ - __get_user_err((x), (ptr), (err)); \ + __get_user_check((x), (ptr), (err)); \ (void)0; \ }) -#define get_user(x, ptr) \ +#define __get_user(x, ptr) \ ({ \ - __typeof__(*(ptr)) __user *__p = (ptr); \ - might_fault(); \ - access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ - __p = uaccess_mask_ptr(__p), __get_user((x), __p) : \ - ((x) = 0, -EFAULT); \ + int __gu_err = 0; \ + __get_user_check((x), (ptr), __gu_err); \ + __gu_err; \ }) +#define get_user __get_user + #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ asm volatile( \ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ @@ -358,28 +363,33 @@ do { \ uaccess_disable_not_uao(); \ } while (0) -#define __put_user(x, ptr) \ +#define __put_user_check(x, ptr, err) \ ({ \ - int __pu_err = 0; \ - __put_user_err((x), (ptr), __pu_err); \ - __pu_err; \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ + __p = uaccess_mask_ptr(__p); \ + __put_user_err((x), __p, (err)); \ + } else { \ + (err) = -EFAULT; \ + } \ }) #define __put_user_error(x, ptr, err) \ ({ \ - __put_user_err((x), (ptr), (err)); \ + __put_user_check((x), (ptr), (err)); \ (void)0; \ }) -#define put_user(x, ptr) \ +#define __put_user(x, ptr) \ ({ \ - __typeof__(*(ptr)) __user *__p = (ptr); \ - might_fault(); \ - access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ - __p = uaccess_mask_ptr(__p), __put_user((x), __p) : \ - -EFAULT; \ + int __pu_err = 0; \ + __put_user_check((x), (ptr), __pu_err); \ + __pu_err; \ }) +#define put_user __put_user + extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); #define raw_copy_from_user __arch_copy_from_user extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); From ba32050d308a8bcd8ea300daf00dc197523d47f8 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 5 Feb 2018 15:34:23 +0000 Subject: [PATCH 1846/3326] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user Commit f71c2ffcb20d upstream. Like we've done for get_user and put_user, ensure that user pointers are masked before invoking the underlying __arch_{clear,copy_*}_user operations. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/uaccess.h | 29 ++++++++++++++++++++++------- arch/arm64/kernel/arm64ksyms.c | 4 ++-- arch/arm64/lib/clear_user.S | 6 +++--- arch/arm64/lib/copy_in_user.S | 5 +++-- 4 files changed, 30 insertions(+), 14 deletions(-) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 0687bb084719a4..fad8c1b2ca3ef8 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -391,20 +391,35 @@ do { \ #define put_user __put_user extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); -#define raw_copy_from_user __arch_copy_from_user +#define raw_copy_from_user(to, from, n) \ +({ \ + __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ +}) + extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); -#define raw_copy_to_user __arch_copy_to_user -extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); +#define raw_copy_to_user(to, from, n) \ +({ \ + __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ +}) + +extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); +#define raw_copy_in_user(to, from, n) \ +({ \ + __arch_copy_in_user(__uaccess_mask_ptr(to), \ + __uaccess_mask_ptr(from), (n)); \ +}) + #define INLINE_COPY_TO_USER #define INLINE_COPY_FROM_USER -static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) +extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); +static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) - n = __clear_user(__uaccess_mask_ptr(to), n); + n = __arch_clear_user(__uaccess_mask_ptr(to), n); return n; } +#define clear_user __clear_user extern long strncpy_from_user(char *dest, const char __user *src, long count); @@ -418,7 +433,7 @@ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __ static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) { kasan_check_write(dst, size); - return __copy_user_flushcache(dst, src, size); + return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); } #endif diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index 67368c7329c03e..66be504edb6cf5 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page); /* user mem (segment) */ EXPORT_SYMBOL(__arch_copy_from_user); EXPORT_SYMBOL(__arch_copy_to_user); -EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(raw_copy_in_user); +EXPORT_SYMBOL(__arch_clear_user); +EXPORT_SYMBOL(__arch_copy_in_user); /* physical memory */ EXPORT_SYMBOL(memstart_addr); diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index 3d69a8d41fa5e5..21ba0b29621b2f 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -21,7 +21,7 @@ .text -/* Prototype: int __clear_user(void *addr, size_t sz) +/* Prototype: int __arch_clear_user(void *addr, size_t sz) * Purpose : clear some user memory * Params : addr - user memory address to clear * : sz - number of bytes to clear @@ -29,7 +29,7 @@ * * Alignment fixed up by hardware. */ -ENTRY(__clear_user) +ENTRY(__arch_clear_user) uaccess_enable_not_uao x2, x3, x4 mov x2, x1 // save the size for fixup return subs x1, x1, #8 @@ -52,7 +52,7 @@ uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 5: mov x0, #0 uaccess_disable_not_uao x2, x3 ret -ENDPROC(__clear_user) +ENDPROC(__arch_clear_user) .section .fixup,"ax" .align 2 diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index fbb090f431a5c0..54b75deb1d1606 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -64,14 +64,15 @@ .endm end .req x5 -ENTRY(raw_copy_in_user) + +ENTRY(__arch_copy_in_user) uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" uaccess_disable_not_uao x3, x4 mov x0, #0 ret -ENDPROC(raw_copy_in_user) +ENDPROC(__arch_copy_in_user) .section .fixup,"ax" .align 2 From d9ef050f28953658ab1621a65c4090600e30bfde Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 5 Feb 2018 15:34:24 +0000 Subject: [PATCH 1847/3326] arm64: futex: Mask __user pointers prior to dereference Commit 91b2d3442f6a upstream. The arm64 futex code has some explicit dereferencing of user pointers where performing atomic operations in response to a futex command. This patch uses masking to limit any speculative futex operations to within the user address space. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/futex.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 5bb2fd4674e7c5..07fe2479d3105d 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -48,9 +48,10 @@ do { \ } while (0) static inline int -arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) { int oldval = 0, ret, tmp; + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr); pagefault_disable(); @@ -88,15 +89,17 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, u32 oldval, u32 newval) { int ret = 0; u32 val, tmp; + u32 __user *uaddr; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32))) return -EFAULT; + uaddr = __uaccess_mask_ptr(_uaddr); uaccess_enable(); asm volatile("// futex_atomic_cmpxchg_inatomic\n" " prfm pstl1strm, %2\n" From da1f67921d2ff82ac3dfbf193dc4596da569a5c6 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 15 Jan 2018 19:38:54 +0000 Subject: [PATCH 1848/3326] arm64: cpufeature: __this_cpu_has_cap() shouldn't stop early Commit edf298cfce47 upstream. this_cpu_has_cap() tests caps->desc not caps->matches, so it stops walking the list when it finds a 'silent' feature, instead of walking to the end of the list. Prior to v4.6's 644c2ae198412 ("arm64: cpufeature: Test 'matches' pointer to find the end of the list") we always tested desc to find the end of a capability list. This was changed for dubious things like PAN_NOT_UAO. v4.7's e3661b128e53e ("arm64: Allow a capability to be checked on single CPU") added this_cpu_has_cap() using the old desc style test. CC: Suzuki K Poulose Reviewed-by: Suzuki K Poulose Acked-by: Marc Zyngier Signed-off-by: James Morse Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/cpufeature.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 451b319323c1da..9249cb7289925d 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1102,9 +1102,8 @@ static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, if (WARN_ON(preemptible())) return false; - for (caps = cap_array; caps->desc; caps++) + for (caps = cap_array; caps->matches; caps++) if (caps->capability == cap && - caps->matches && caps->matches(caps, SCOPE_LOCAL_CPU)) return true; return false; From 9da836a476fe5f90348ca5edfb5af68c2ebb558b Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 17 Jan 2018 17:42:20 +0000 Subject: [PATCH 1849/3326] arm64: Run enable method for errata work arounds on late CPUs Commit 55b35d070c25 upstream. When a CPU is brought up after we have finalised the system wide capabilities (i.e, features and errata), we make sure the new CPU doesn't need a new errata work around which has not been detected already. However we don't run enable() method on the new CPU for the errata work arounds already detected. This could cause the new CPU running without potential work arounds. It is upto the "enable()" method to decide if this CPU should do something about the errata. Fixes: commit 6a6efbb45b7d95c84 ("arm64: Verify CPU errata work arounds on hotplugged CPU") Cc: Will Deacon Cc: Mark Rutland Cc: Andre Przywara Cc: Dave Martin Signed-off-by: Suzuki K Poulose Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/cpu_errata.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index e4c78630a730cd..624f8a1180291c 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -221,15 +221,18 @@ void verify_local_cpu_errata_workarounds(void) { const struct arm64_cpu_capabilities *caps = arm64_errata; - for (; caps->matches; caps++) - if (!cpus_have_cap(caps->capability) && - caps->matches(caps, SCOPE_LOCAL_CPU)) { + for (; caps->matches; caps++) { + if (cpus_have_cap(caps->capability)) { + if (caps->enable) + caps->enable((void *)caps); + } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) { pr_crit("CPU%d: Requires work around for %s, not detected" " at boot time\n", smp_processor_id(), caps->desc ? : "an erratum"); cpu_die_early(); } + } } void update_cpu_errata_workarounds(void) From be53742befea9329b034f143e7e932cac54d3609 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jan 2018 21:37:25 +0000 Subject: [PATCH 1850/3326] arm64: cpufeature: Pass capability structure to ->enable callback Commit 0a0d111d40fd upstream. In order to invoke the CPU capability ->matches callback from the ->enable callback for applying local-CPU workarounds, we need a handle on the capability structure. This patch passes a pointer to the capability structure to the ->enable callback. Reviewed-by: Suzuki K Poulose Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/cpufeature.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 9249cb7289925d..c284b855315f6e 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1144,7 +1144,7 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) * uses an IPI, giving us a PSTATE that disappears when * we return. */ - stop_machine(caps->enable, NULL, cpu_online_mask); + stop_machine(caps->enable, (void *)caps, cpu_online_mask); } } } @@ -1203,7 +1203,7 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list) cpu_die_early(); } if (caps->enable) - caps->enable(NULL); + caps->enable((void *)caps); } } From f91f190708b25df0a5675800bcabae13bf2aa882 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jan 2018 21:45:41 +0000 Subject: [PATCH 1851/3326] drivers/firmware: Expose psci_get_version through psci_ops structure Commit d68e3ba5303f upstream. Entry into recent versions of ARM Trusted Firmware will invalidate the CPU branch predictor state in order to protect against aliasing attacks. This patch exposes the PSCI "VERSION" function via psci_ops, so that it can be invoked outside of the PSCI driver where necessary. Acked-by: Lorenzo Pieralisi Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/psci.c | 2 ++ include/linux/psci.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index d687ca3d5049b8..8b25d31e84016b 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -496,6 +496,8 @@ static void __init psci_init_migrate(void) static void __init psci_0_2_set_functions(void) { pr_info("Using standard PSCI v0.2 function IDs\n"); + psci_ops.get_version = psci_get_version; + psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_FN_NATIVE(0_2, CPU_SUSPEND); psci_ops.cpu_suspend = psci_cpu_suspend; diff --git a/include/linux/psci.h b/include/linux/psci.h index bdea1cb5e1db14..6306ab10af1821 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -26,6 +26,7 @@ int psci_cpu_init_idle(unsigned int cpu); int psci_cpu_suspend_enter(unsigned long index); struct psci_operations { + u32 (*get_version)(void); int (*cpu_suspend)(u32 state, unsigned long entry_point); int (*cpu_off)(u32 state); int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); From c10e4aa77814063ac459fab673a5a392b7334b42 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 2 Jan 2018 18:19:39 +0000 Subject: [PATCH 1852/3326] arm64: Move post_ttbr_update_workaround to C code Commit 95e3de3590e3 upstream. We will soon need to invoke a CPU-specific function pointer after changing page tables, so move post_ttbr_update_workaround out into C code to make this possible. Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/assembler.h | 13 ------------- arch/arm64/kernel/entry.S | 2 +- arch/arm64/mm/context.c | 9 +++++++++ arch/arm64/mm/proc.S | 3 +-- 4 files changed, 11 insertions(+), 16 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 6d951a82e656ad..463619dcadd4a9 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -481,19 +481,6 @@ alternative_endif mrs \rd, sp_el0 .endm -/* - * Errata workaround post TTBRx_EL1 update. - */ - .macro post_ttbr_update_workaround -#ifdef CONFIG_CAVIUM_ERRATUM_27456 -alternative_if ARM64_WORKAROUND_CAVIUM_27456 - ic iallu - dsb nsh - isb -alternative_else_nop_endif -#endif - .endm - /** * Errata workaround prior to disable MMU. Insert an ISB immediately prior * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0. diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index f786e8d3d5bee1..185c87a53fe3e7 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -275,7 +275,7 @@ alternative_else_nop_endif * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache * corruption). */ - post_ttbr_update_workaround + bl post_ttbr_update_workaround .endif 1: .if \el != 0 diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index db28958d9e4f7c..23498d032c8208 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -235,6 +235,15 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) cpu_switch_mm(mm->pgd, mm); } +/* Errata workaround post TTBRx_EL1 update. */ +asmlinkage void post_ttbr_update_workaround(void) +{ + asm(ALTERNATIVE("nop; nop; nop", + "ic iallu; dsb nsh; isb", + ARM64_WORKAROUND_CAVIUM_27456, + CONFIG_CAVIUM_ERRATUM_27456)); +} + static int asids_init(void) { asid_bits = get_cpu_asid_bits(); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index c25e58bc291005..27058f3fd1320d 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -148,8 +148,7 @@ ENTRY(cpu_do_switch_mm) isb msr ttbr0_el1, x0 // now update TTBR0 isb - post_ttbr_update_workaround - ret + b post_ttbr_update_workaround // Back to C code... ENDPROC(cpu_do_switch_mm) .pushsection ".idmap.text", "awx" From 5bee81c980297f3f5486539881ab4241c5f0dea3 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 3 Jan 2018 11:17:58 +0000 Subject: [PATCH 1853/3326] arm64: Add skeleton to harden the branch predictor against aliasing attacks Commit 0f15adbb2861 upstream. Aliasing attacks against CPU branch predictors can allow an attacker to redirect speculative control flow on some CPUs and potentially divulge information from one context to another. This patch adds initial skeleton code behind a new Kconfig option to enable implementation-specific mitigations against these attacks for CPUs that are affected. Co-developed-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/Kconfig | 17 ++++++++ arch/arm64/include/asm/cpucaps.h | 3 +- arch/arm64/include/asm/mmu.h | 37 ++++++++++++++++ arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kernel/Makefile | 4 ++ arch/arm64/kernel/bpi.S | 55 ++++++++++++++++++++++++ arch/arm64/kernel/cpu_errata.c | 74 ++++++++++++++++++++++++++++++++ arch/arm64/kernel/cpufeature.c | 1 + arch/arm64/kernel/entry.S | 8 ++-- arch/arm64/mm/context.c | 2 + arch/arm64/mm/fault.c | 17 ++++++++ 11 files changed, 215 insertions(+), 4 deletions(-) create mode 100644 arch/arm64/kernel/bpi.S diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0b6b59d93cf895..c2abb4e88ff2fa 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -818,6 +818,23 @@ config UNMAP_KERNEL_AT_EL0 If unsure, say Y. +config HARDEN_BRANCH_PREDICTOR + bool "Harden the branch predictor against aliasing attacks" if EXPERT + default y + help + Speculation attacks against some high-performance processors rely on + being able to manipulate the branch predictor for a victim context by + executing aliasing branches in the attacker context. Such attacks + can be partially mitigated against by clearing internal branch + predictor state and limiting the prediction logic in some situations. + + This config option will take CPU-specific actions to harden the + branch predictor against aliasing attacks and may rely on specific + instruction sequences or control bits being set by the system + firmware. + + If unsure, say Y. + menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 6835a48b31d4a4..8498eb4ccfc252 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -41,7 +41,8 @@ #define ARM64_WORKAROUND_CAVIUM_30115 20 #define ARM64_HAS_DCPOP 21 #define ARM64_UNMAP_KERNEL_AT_EL0 23 +#define ARM64_HARDEN_BRANCH_PREDICTOR 24 -#define ARM64_NCAPS 24 +#define ARM64_NCAPS 25 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 6f7bdb89817ffe..6dd83d75b82ab8 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -41,6 +41,43 @@ static inline bool arm64_kernel_unmapped_at_el0(void) cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); } +typedef void (*bp_hardening_cb_t)(void); + +struct bp_hardening_data { + int hyp_vectors_slot; + bp_hardening_cb_t fn; +}; + +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[]; + +DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); + +static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) +{ + return this_cpu_ptr(&bp_hardening_data); +} + +static inline void arm64_apply_bp_hardening(void) +{ + struct bp_hardening_data *d; + + if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) + return; + + d = arm64_get_bp_hardening_data(); + if (d->fn) + d->fn(); +} +#else +static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) +{ + return NULL; +} + +static inline void arm64_apply_bp_hardening(void) { } +#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ + extern void paging_init(void); extern void bootmem_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 1efbe29c8b248e..ede80d47d0effd 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -333,6 +333,7 @@ /* id_aa64pfr0 */ #define ID_AA64PFR0_CSV3_SHIFT 60 +#define ID_AA64PFR0_CSV2_SHIFT 56 #define ID_AA64PFR0_GIC_SHIFT 24 #define ID_AA64PFR0_ASIMD_SHIFT 20 #define ID_AA64PFR0_FP_SHIFT 16 diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 2f5ff2a65db3f4..def8d5623fd19c 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -55,6 +55,10 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +ifeq ($(CONFIG_KVM),y) +arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o +endif + obj-y += $(arm64-obj-y) vdso/ probes/ obj-m += $(arm64-obj-m) head-y := head.o diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S new file mode 100644 index 00000000000000..06a931eb26737a --- /dev/null +++ b/arch/arm64/kernel/bpi.S @@ -0,0 +1,55 @@ +/* + * Contains CPU specific branch predictor invalidation sequences + * + * Copyright (C) 2018 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + +.macro ventry target + .rept 31 + nop + .endr + b \target +.endm + +.macro vectors target + ventry \target + 0x000 + ventry \target + 0x080 + ventry \target + 0x100 + ventry \target + 0x180 + + ventry \target + 0x200 + ventry \target + 0x280 + ventry \target + 0x300 + ventry \target + 0x380 + + ventry \target + 0x400 + ventry \target + 0x480 + ventry \target + 0x500 + ventry \target + 0x580 + + ventry \target + 0x600 + ventry \target + 0x680 + ventry \target + 0x700 + ventry \target + 0x780 +.endm + + .align 11 +ENTRY(__bp_harden_hyp_vecs_start) + .rept 4 + vectors __kvm_hyp_vector + .endr +ENTRY(__bp_harden_hyp_vecs_end) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 624f8a1180291c..3f2fee9d4590ef 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -60,6 +60,80 @@ static int cpu_enable_trap_ctr_access(void *__unused) return 0; } +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +#include +#include + +DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); + +#ifdef CONFIG_KVM +static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, + const char *hyp_vecs_end) +{ + void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K); + int i; + + for (i = 0; i < SZ_2K; i += 0x80) + memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); + + flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); +} + +static void __install_bp_hardening_cb(bp_hardening_cb_t fn, + const char *hyp_vecs_start, + const char *hyp_vecs_end) +{ + static int last_slot = -1; + static DEFINE_SPINLOCK(bp_lock); + int cpu, slot = -1; + + spin_lock(&bp_lock); + for_each_possible_cpu(cpu) { + if (per_cpu(bp_hardening_data.fn, cpu) == fn) { + slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); + break; + } + } + + if (slot == -1) { + last_slot++; + BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start) + / SZ_2K) <= last_slot); + slot = last_slot; + __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); + } + + __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); + __this_cpu_write(bp_hardening_data.fn, fn); + spin_unlock(&bp_lock); +} +#else +static void __install_bp_hardening_cb(bp_hardening_cb_t fn, + const char *hyp_vecs_start, + const char *hyp_vecs_end) +{ + __this_cpu_write(bp_hardening_data.fn, fn); +} +#endif /* CONFIG_KVM */ + +static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, + bp_hardening_cb_t fn, + const char *hyp_vecs_start, + const char *hyp_vecs_end) +{ + u64 pfr0; + + if (!entry->matches(entry, SCOPE_LOCAL_CPU)) + return; + + pfr0 = read_cpuid(ID_AA64PFR0_EL1); + if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) + return; + + __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); +} +#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ + #define MIDR_RANGE(model, min, max) \ .def_scope = SCOPE_LOCAL_CPU, \ .matches = is_affected_midr_range, \ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c284b855315f6e..582142ae92e17e 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -126,6 +126,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 185c87a53fe3e7..a035a981b5579a 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -724,13 +724,15 @@ el0_ia: * Instruction abort handling */ mrs x26, far_el1 - // enable interrupts before calling the main handler - enable_dbg_and_irq + enable_dbg +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif ct_user_exit mov x0, x26 mov x1, x25 mov x2, sp - bl do_mem_abort + bl do_el0_ia_bp_hardening b ret_to_user el0_fpsimd_acc: /* diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 23498d032c8208..abc56dc31ae06e 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -242,6 +242,8 @@ asmlinkage void post_ttbr_update_workaround(void) "ic iallu; dsb nsh; isb", ARM64_WORKAROUND_CAVIUM_27456, CONFIG_CAVIUM_ERRATUM_27456)); + + arm64_apply_bp_hardening(); } static int asids_init(void) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 2789f46622864f..4985bd1175ef67 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -751,6 +751,23 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, arm64_notify_die("", regs, &info, esr); } +asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr, + unsigned int esr, + struct pt_regs *regs) +{ + /* + * We've taken an instruction abort from userspace and not yet + * re-enabled IRQs. If the address is a kernel address, apply + * BP hardening prior to enabling IRQs and pre-emption. + */ + if (addr > TASK_SIZE) + arm64_apply_bp_hardening(); + + local_irq_enable(); + do_mem_abort(addr, esr, regs); +} + + /* * Handle stack alignment exceptions. */ From 9107ac4ea3da68af722d1b6820f90cf0c119b134 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 19 Jan 2018 15:42:09 +0000 Subject: [PATCH 1854/3326] arm64: Move BP hardening to check_and_switch_context Commit a8e4c0a919ae upstream. We call arm64_apply_bp_hardening() from post_ttbr_update_workaround, which has the unexpected consequence of being triggered on every exception return to userspace when ARM64_SW_TTBR0_PAN is selected, even if no context switch actually occured. This is a bit suboptimal, and it would be more logical to only invalidate the branch predictor when we actually switch to a different mm. In order to solve this, move the call to arm64_apply_bp_hardening() into check_and_switch_context(), where we're guaranteed to pick a different mm context. Acked-by: Will Deacon Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/mm/context.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index abc56dc31ae06e..9284788733d606 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -227,6 +227,9 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); switch_mm_fastpath: + + arm64_apply_bp_hardening(); + /* * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when * emulating PAN. @@ -242,8 +245,6 @@ asmlinkage void post_ttbr_update_workaround(void) "ic iallu; dsb nsh; isb", ARM64_WORKAROUND_CAVIUM_27456, CONFIG_CAVIUM_ERRATUM_27456)); - - arm64_apply_bp_hardening(); } static int asids_init(void) From aab3306701f10c5dc35d8da74431cde6249baf0b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 3 Jan 2018 16:38:35 +0000 Subject: [PATCH 1855/3326] arm64: KVM: Use per-CPU vector when BP hardening is enabled Commit 6840bdd73d07 upstream. Now that we have per-CPU vectors, let's plug then in the KVM/arm64 code. Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/kvm_mmu.h | 10 +++++++++ arch/arm64/include/asm/kvm_mmu.h | 38 ++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/switch.c | 2 +- virt/kvm/arm/arm.c | 8 ++++++- 4 files changed, 56 insertions(+), 2 deletions(-) diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index fa6f2174276bdd..eb46fc81a440c3 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -221,6 +221,16 @@ static inline unsigned int kvm_get_vmid_bits(void) return 8; } +static inline void *kvm_get_hyp_vector(void) +{ + return kvm_ksym_ref(__kvm_hyp_vector); +} + +static inline int kvm_map_vectors(void) +{ + return 0; +} + #endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 672c8684d5c2a7..2d6d4bd9de52b4 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -309,5 +309,43 @@ static inline unsigned int kvm_get_vmid_bits(void) return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; } +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +#include + +static inline void *kvm_get_hyp_vector(void) +{ + struct bp_hardening_data *data = arm64_get_bp_hardening_data(); + void *vect = kvm_ksym_ref(__kvm_hyp_vector); + + if (data->fn) { + vect = __bp_harden_hyp_vecs_start + + data->hyp_vectors_slot * SZ_2K; + + if (!has_vhe()) + vect = lm_alias(vect); + } + + return vect; +} + +static inline int kvm_map_vectors(void) +{ + return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start), + kvm_ksym_ref(__bp_harden_hyp_vecs_end), + PAGE_HYP_EXEC); +} + +#else +static inline void *kvm_get_hyp_vector(void) +{ + return kvm_ksym_ref(__kvm_hyp_vector); +} + +static inline int kvm_map_vectors(void) +{ + return 0; +} +#endif + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 945e79c641c4a2..9b2a76d783eb8e 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -51,7 +51,7 @@ static void __hyp_text __activate_traps_vhe(void) val &= ~CPACR_EL1_FPEN; write_sysreg(val, cpacr_el1); - write_sysreg(__kvm_hyp_vector, vbar_el1); + write_sysreg(kvm_get_hyp_vector(), vbar_el1); } static void __hyp_text __activate_traps_nvhe(void) diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 9a07ee94a2306b..fc27a152881176 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1139,7 +1139,7 @@ static void cpu_init_hyp_mode(void *dummy) pgd_ptr = kvm_mmu_get_httbr(); stack_page = __this_cpu_read(kvm_arm_hyp_stack_page); hyp_stack_ptr = stack_page + PAGE_SIZE; - vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector); + vector_ptr = (unsigned long)kvm_get_hyp_vector(); __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr); __cpu_init_stage2(); @@ -1384,6 +1384,12 @@ static int init_hyp_mode(void) goto out_err; } + err = kvm_map_vectors(); + if (err) { + kvm_err("Cannot map vectors\n"); + goto out_err; + } + /* * Map the Hyp stack pages */ From 6b47a8256a56c261ab008863a548f245fdcc8b11 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 2 Feb 2018 17:31:39 +0000 Subject: [PATCH 1856/3326] arm64: entry: Apply BP hardening for high-priority synchronous exceptions Commit 5dfc6ed27710 upstream. Software-step and PC alignment fault exceptions have higher priority than instruction abort exceptions, so apply the BP hardening hooks there too if the user PC appears to reside in kernel space. Reported-by: Dan Hettena Reviewed-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/entry.S | 6 ++++-- arch/arm64/mm/fault.c | 9 +++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a035a981b5579a..ac7df45b296b88 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -759,8 +759,10 @@ el0_sp_pc: * Stack or PC alignment exception handling */ mrs x26, far_el1 - // enable interrupts before calling the main handler - enable_dbg_and_irq + enable_dbg +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif ct_user_exit mov x0, x26 mov x1, x25 diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 4985bd1175ef67..58992467ebb9d3 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -778,6 +778,12 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, struct siginfo info; struct task_struct *tsk = current; + if (user_mode(regs)) { + if (instruction_pointer(regs) > TASK_SIZE) + arm64_apply_bp_hardening(); + local_irq_enable(); + } + if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS)) pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n", tsk->comm, task_pid_nr(tsk), @@ -837,6 +843,9 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, if (interrupts_enabled(regs)) trace_hardirqs_off(); + if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE) + arm64_apply_bp_hardening(); + if (!inf->fn(addr, esr, regs)) { rv = 1; } else { From 48c3538c35780838da19773b615ede148e6af2b0 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 2 Feb 2018 17:31:40 +0000 Subject: [PATCH 1857/3326] arm64: entry: Apply BP hardening for suspicious interrupts from EL0 Commit 30d88c0e3ace upstream. It is possible to take an IRQ from EL0 following a branch to a kernel address in such a way that the IRQ is prioritised over the instruction abort. Whilst an attacker would need to get the stars to align here, it might be sufficient with enough calibration so perform BP hardening in the rare case that we see a kernel address in the ELR when handling an IRQ from EL0. Reported-by: Dan Hettena Reviewed-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/entry.S | 5 +++++ arch/arm64/mm/fault.c | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index ac7df45b296b88..93958d1341bbd7 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -821,6 +821,11 @@ el0_irq_naked: #endif ct_user_exit +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + tbz x22, #55, 1f + bl do_el0_irq_bp_hardening +1: +#endif irq_handler #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 58992467ebb9d3..5edb706aacb0df 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -751,6 +751,12 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, arm64_notify_die("", regs, &info, esr); } +asmlinkage void __exception do_el0_irq_bp_hardening(void) +{ + /* PC has already been checked in entry.S */ + arm64_apply_bp_hardening(); +} + asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr, struct pt_regs *regs) From 3317097b2b4affdd0b600ef0eb558a3e82964dc6 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 3 Jan 2018 11:19:34 +0000 Subject: [PATCH 1858/3326] arm64: cputype: Add missing MIDR values for Cortex-A72 and Cortex-A75 Commit a65d219fe5dc upstream. Hook up MIDR values for the Cortex-A72 and Cortex-A75 CPUs, since they will soon need MIDR matches for hardening the branch predictor. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/cputype.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 157afb9f7a3516..be7bd19c87ec23 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -79,8 +79,10 @@ #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 #define ARM_CPU_PART_CORTEX_A57 0xD07 +#define ARM_CPU_PART_CORTEX_A72 0xD08 #define ARM_CPU_PART_CORTEX_A53 0xD03 #define ARM_CPU_PART_CORTEX_A73 0xD09 +#define ARM_CPU_PART_CORTEX_A75 0xD0A #define APM_CPU_PART_POTENZA 0x000 @@ -97,7 +99,9 @@ #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) +#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) +#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) From 48993dfa1af8c719576a18c0e2ca1d611297e34e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 3 Jan 2018 12:46:21 +0000 Subject: [PATCH 1859/3326] arm64: Implement branch predictor hardening for affected Cortex-A CPUs Commit aa6acde65e03 upstream. Cortex-A57, A72, A73 and A75 are susceptible to branch predictor aliasing and can theoretically be attacked by malicious code. This patch implements a PSCI-based mitigation for these CPUs when available. The call into firmware will invalidate the branch predictor state, preventing any malicious entries from affecting other victim contexts. Co-developed-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/bpi.S | 24 +++++++++++++++++++ arch/arm64/kernel/cpu_errata.c | 42 ++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S index 06a931eb26737a..dec95bd82e319e 100644 --- a/arch/arm64/kernel/bpi.S +++ b/arch/arm64/kernel/bpi.S @@ -53,3 +53,27 @@ ENTRY(__bp_harden_hyp_vecs_start) vectors __kvm_hyp_vector .endr ENTRY(__bp_harden_hyp_vecs_end) +ENTRY(__psci_hyp_bp_inval_start) + sub sp, sp, #(8 * 18) + stp x16, x17, [sp, #(16 * 0)] + stp x14, x15, [sp, #(16 * 1)] + stp x12, x13, [sp, #(16 * 2)] + stp x10, x11, [sp, #(16 * 3)] + stp x8, x9, [sp, #(16 * 4)] + stp x6, x7, [sp, #(16 * 5)] + stp x4, x5, [sp, #(16 * 6)] + stp x2, x3, [sp, #(16 * 7)] + stp x0, x1, [sp, #(16 * 8)] + mov x0, #0x84000000 + smc #0 + ldp x16, x17, [sp, #(16 * 0)] + ldp x14, x15, [sp, #(16 * 1)] + ldp x12, x13, [sp, #(16 * 2)] + ldp x10, x11, [sp, #(16 * 3)] + ldp x8, x9, [sp, #(16 * 4)] + ldp x6, x7, [sp, #(16 * 5)] + ldp x4, x5, [sp, #(16 * 6)] + ldp x2, x3, [sp, #(16 * 7)] + ldp x0, x1, [sp, #(16 * 8)] + add sp, sp, #(8 * 18) +ENTRY(__psci_hyp_bp_inval_end) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 3f2fee9d4590ef..e09027094f318b 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -67,6 +67,8 @@ static int cpu_enable_trap_ctr_access(void *__unused) DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); #ifdef CONFIG_KVM +extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[]; + static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, const char *hyp_vecs_end) { @@ -108,6 +110,9 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, spin_unlock(&bp_lock); } #else +#define __psci_hyp_bp_inval_start NULL +#define __psci_hyp_bp_inval_end NULL + static void __install_bp_hardening_cb(bp_hardening_cb_t fn, const char *hyp_vecs_start, const char *hyp_vecs_end) @@ -132,6 +137,21 @@ static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); } + +#include + +static int enable_psci_bp_hardening(void *data) +{ + const struct arm64_cpu_capabilities *entry = data; + + if (psci_ops.get_version) + install_bp_hardening_cb(entry, + (bp_hardening_cb_t)psci_ops.get_version, + __psci_hyp_bp_inval_start, + __psci_hyp_bp_inval_end); + + return 0; +} #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ #define MIDR_RANGE(model, min, max) \ @@ -281,6 +301,28 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_WORKAROUND_858921, MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), }, +#endif +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + .enable = enable_psci_bp_hardening, + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + .enable = enable_psci_bp_hardening, + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), + .enable = enable_psci_bp_hardening, + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), + .enable = enable_psci_bp_hardening, + }, #endif { } From 9b26a45c34e40575e53287365be7571b9cda21ab Mon Sep 17 00:00:00 2001 From: Shanker Donthineni Date: Fri, 5 Jan 2018 14:28:59 -0600 Subject: [PATCH 1860/3326] arm64: Implement branch predictor hardening for Falkor Commit ec82b567a74f upstream. Falkor is susceptible to branch predictor aliasing and can theoretically be attacked by malicious code. This patch implements a mitigation for these attacks, preventing any malicious entries from affecting other victim contexts. Signed-off-by: Shanker Donthineni [will: fix label name when !CONFIG_KVM and remove references to MIDR_FALKOR] Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/include/asm/kvm_asm.h | 2 ++ arch/arm64/kernel/bpi.S | 8 +++++++ arch/arm64/kernel/cpu_errata.c | 40 ++++++++++++++++++++++++++++++-- arch/arm64/kvm/hyp/entry.S | 12 ++++++++++ arch/arm64/kvm/hyp/switch.c | 8 +++++++ 6 files changed, 70 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 8498eb4ccfc252..2e7b236bc59620 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -42,7 +42,8 @@ #define ARM64_HAS_DCPOP 21 #define ARM64_UNMAP_KERNEL_AT_EL0 23 #define ARM64_HARDEN_BRANCH_PREDICTOR 24 +#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 -#define ARM64_NCAPS 25 +#define ARM64_NCAPS 26 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 26a64d0f9ab919..a7ef5a0519115f 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -66,6 +66,8 @@ extern u32 __kvm_get_mdcr_el2(void); extern u32 __init_stage2_translation(void); +extern void __qcom_hyp_sanitize_btac_predictors(void); + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S index dec95bd82e319e..76225c2611eac5 100644 --- a/arch/arm64/kernel/bpi.S +++ b/arch/arm64/kernel/bpi.S @@ -77,3 +77,11 @@ ENTRY(__psci_hyp_bp_inval_start) ldp x0, x1, [sp, #(16 * 8)] add sp, sp, #(8 * 18) ENTRY(__psci_hyp_bp_inval_end) + +ENTRY(__qcom_hyp_sanitize_link_stack_start) + stp x29, x30, [sp, #-16]! + .rept 16 + bl . + 4 + .endr + ldp x29, x30, [sp], #16 +ENTRY(__qcom_hyp_sanitize_link_stack_end) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index e09027094f318b..54e41dfe41f669 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -68,6 +68,8 @@ DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); #ifdef CONFIG_KVM extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[]; +extern char __qcom_hyp_sanitize_link_stack_start[]; +extern char __qcom_hyp_sanitize_link_stack_end[]; static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, const char *hyp_vecs_end) @@ -110,8 +112,10 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, spin_unlock(&bp_lock); } #else -#define __psci_hyp_bp_inval_start NULL -#define __psci_hyp_bp_inval_end NULL +#define __psci_hyp_bp_inval_start NULL +#define __psci_hyp_bp_inval_end NULL +#define __qcom_hyp_sanitize_link_stack_start NULL +#define __qcom_hyp_sanitize_link_stack_end NULL static void __install_bp_hardening_cb(bp_hardening_cb_t fn, const char *hyp_vecs_start, @@ -152,6 +156,29 @@ static int enable_psci_bp_hardening(void *data) return 0; } + +static void qcom_link_stack_sanitization(void) +{ + u64 tmp; + + asm volatile("mov %0, x30 \n" + ".rept 16 \n" + "bl . + 4 \n" + ".endr \n" + "mov x30, %0 \n" + : "=&r" (tmp)); +} + +static int qcom_enable_link_stack_sanitization(void *data) +{ + const struct arm64_cpu_capabilities *entry = data; + + install_bp_hardening_cb(entry, qcom_link_stack_sanitization, + __qcom_hyp_sanitize_link_stack_start, + __qcom_hyp_sanitize_link_stack_end); + + return 0; +} #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ #define MIDR_RANGE(model, min, max) \ @@ -323,6 +350,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), .enable = enable_psci_bp_hardening, }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), + .enable = qcom_enable_link_stack_sanitization, + }, + { + .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), + }, #endif { } diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 12ee62d6d41080..9c45c6af1f5828 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -196,3 +196,15 @@ alternative_endif eret ENDPROC(__fpsimd_guest_restore) + +ENTRY(__qcom_hyp_sanitize_btac_predictors) + /** + * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700) + * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls + * b15-b0: contains SiP functionID + */ + movz x0, #0x1700 + movk x0, #0xc200, lsl #16 + smc #0 + ret +ENDPROC(__qcom_hyp_sanitize_btac_predictors) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 9b2a76d783eb8e..0d3d3f1e348c0e 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -364,6 +364,14 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) /* 0 falls through to be handled out of EL2 */ } + if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) { + u32 midr = read_cpuid_id(); + + /* Apply BTAC predictors mitigation to all Falkor chips */ + if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1) + __qcom_hyp_sanitize_btac_predictors(); + } + fp_enabled = __fpsimd_enabled(); __sysreg_save_guest_state(guest_ctxt); From 402aeac58753ad5c3582eb8175a336f8e1fb62f4 Mon Sep 17 00:00:00 2001 From: Jayachandran C Date: Fri, 19 Jan 2018 04:22:47 -0800 Subject: [PATCH 1861/3326] arm64: Branch predictor hardening for Cavium ThunderX2 Commit f3d795d9b360 upstream. Use PSCI based mitigation for speculative execution attacks targeting the branch predictor. We use the same mechanism as the one used for Cortex-A CPUs, we expect the PSCI version call to have a side effect of clearing the BTBs. Acked-by: Will Deacon Signed-off-by: Jayachandran C Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/cpu_errata.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 54e41dfe41f669..ed688188223175 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -359,6 +359,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), + .enable = enable_psci_bp_hardening, + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), + .enable = enable_psci_bp_hardening, + }, #endif { } From 0b3512fa7b0a4f1f187a5e38112c5bebaea87fc1 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:07 +0000 Subject: [PATCH 1862/3326] arm64: KVM: Increment PC after handling an SMC trap Commit f5115e8869e1 upstream. When handling an SMC trap, the "preferred return address" is set to that of the SMC, and not the next PC (which is a departure from the behaviour of an SMC that isn't trapped). Increment PC in the handler, as the guest is otherwise forever stuck... Cc: stable@vger.kernel.org Fixes: acfb3b883f6d ("arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls") Reviewed-by: Christoffer Dall Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kvm/handle_exit.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 380261e258ef8a..7747f937c25042 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -53,7 +53,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) { + /* + * "If an SMC instruction executed at Non-secure EL1 is + * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a + * Trap exception, not a Secure Monitor Call exception [...]" + * + * We need to advance the PC after the trap, as it would + * otherwise return to the same address... + */ vcpu_set_reg(vcpu, 0, ~0UL); + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); return 1; } From 591862b560003518dfc369d44d1e177d96f7104c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:08 +0000 Subject: [PATCH 1863/3326] arm/arm64: KVM: Consolidate the PSCI include files Commit 1a2fb94e6a77 upstream. As we're about to update the PSCI support, and because I'm lazy, let's move the PSCI include file to include/kvm so that both ARM architectures can find it. Acked-by: Christoffer Dall Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/kvm_psci.h | 27 ------------------- arch/arm/kvm/handle_exit.c | 2 +- arch/arm64/kvm/handle_exit.c | 3 ++- .../asm/kvm_psci.h => include/kvm/arm_psci.h | 6 ++--- virt/kvm/arm/arm.c | 2 +- virt/kvm/arm/psci.c | 3 ++- 6 files changed, 9 insertions(+), 34 deletions(-) delete mode 100644 arch/arm/include/asm/kvm_psci.h rename arch/arm64/include/asm/kvm_psci.h => include/kvm/arm_psci.h (89%) diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h deleted file mode 100644 index 6bda945d31fa8e..00000000000000 --- a/arch/arm/include/asm/kvm_psci.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2012 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM_KVM_PSCI_H__ -#define __ARM_KVM_PSCI_H__ - -#define KVM_ARM_PSCI_0_1 1 -#define KVM_ARM_PSCI_0_2 2 - -int kvm_psci_version(struct kvm_vcpu *vcpu); -int kvm_psci_call(struct kvm_vcpu *vcpu); - -#endif /* __ARM_KVM_PSCI_H__ */ diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index cf8bf6bf87c4b8..61fb27cdd9c1dc 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include "trace.h" diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 7747f937c25042..ba2ade75d97dd4 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -22,12 +22,13 @@ #include #include +#include + #include #include #include #include #include -#include #define CREATE_TRACE_POINTS #include "trace.h" diff --git a/arch/arm64/include/asm/kvm_psci.h b/include/kvm/arm_psci.h similarity index 89% rename from arch/arm64/include/asm/kvm_psci.h rename to include/kvm/arm_psci.h index bc39e557c56c83..2042bb909474a0 100644 --- a/arch/arm64/include/asm/kvm_psci.h +++ b/include/kvm/arm_psci.h @@ -15,8 +15,8 @@ * along with this program. If not, see . */ -#ifndef __ARM64_KVM_PSCI_H__ -#define __ARM64_KVM_PSCI_H__ +#ifndef __KVM_ARM_PSCI_H__ +#define __KVM_ARM_PSCI_H__ #define KVM_ARM_PSCI_0_1 1 #define KVM_ARM_PSCI_0_2 2 @@ -24,4 +24,4 @@ int kvm_psci_version(struct kvm_vcpu *vcpu); int kvm_psci_call(struct kvm_vcpu *vcpu); -#endif /* __ARM64_KVM_PSCI_H__ */ +#endif /* __KVM_ARM_PSCI_H__ */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index fc27a152881176..385ba604a9b20b 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -29,6 +29,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include "trace.h" @@ -44,7 +45,6 @@ #include #include #include -#include #include #ifdef REQUIRES_VIRT diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index f1e363bab5e896..b322e46fd14212 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -21,9 +21,10 @@ #include #include -#include #include +#include + #include /* From 4efa1a863a1218d4b884a67178735950aab85c2e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:09 +0000 Subject: [PATCH 1864/3326] arm/arm64: KVM: Add PSCI_VERSION helper Commit d0a144f12a7c upstream. As we're about to trigger a PSCI version explosion, it doesn't hurt to introduce a PSCI_VERSION helper that is going to be used everywhere. Reviewed-by: Christoffer Dall Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- include/kvm/arm_psci.h | 6 ++++-- include/uapi/linux/psci.h | 3 +++ virt/kvm/arm/psci.c | 4 +--- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index 2042bb909474a0..5659343580a3e0 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -18,8 +18,10 @@ #ifndef __KVM_ARM_PSCI_H__ #define __KVM_ARM_PSCI_H__ -#define KVM_ARM_PSCI_0_1 1 -#define KVM_ARM_PSCI_0_2 2 +#include + +#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) +#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) int kvm_psci_version(struct kvm_vcpu *vcpu); int kvm_psci_call(struct kvm_vcpu *vcpu); diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h index 760e52a9640feb..b3bcabe380da8c 100644 --- a/include/uapi/linux/psci.h +++ b/include/uapi/linux/psci.h @@ -88,6 +88,9 @@ (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT) #define PSCI_VERSION_MINOR(ver) \ ((ver) & PSCI_VERSION_MINOR_MASK) +#define PSCI_VERSION(maj, min) \ + ((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \ + ((min) & PSCI_VERSION_MINOR_MASK)) /* PSCI features decoding (>=1.0) */ #define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1 diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index b322e46fd14212..999f94d6bb9824 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -25,8 +25,6 @@ #include -#include - /* * This is an implementation of the Power State Coordination Interface * as described in ARM document number ARM DEN 0022A. @@ -222,7 +220,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) * Bits[31:16] = Major Version = 0 * Bits[15:0] = Minor Version = 2 */ - val = 2; + val = KVM_ARM_PSCI_0_2; break; case PSCI_0_2_FN_CPU_SUSPEND: case PSCI_0_2_FN64_CPU_SUSPEND: From ce15f32d48840bbaf49bf5b9ba540befb59548cb Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:10 +0000 Subject: [PATCH 1865/3326] arm/arm64: KVM: Add smccc accessors to PSCI code Commit 84684fecd7ea upstream. Instead of open coding the accesses to the various registers, let's add explicit SMCCC accessors. Reviewed-by: Christoffer Dall Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- virt/kvm/arm/psci.c | 52 ++++++++++++++++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 10 deletions(-) diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index 999f94d6bb9824..c41553d3511038 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -32,6 +32,38 @@ #define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1) +static u32 smccc_get_function(struct kvm_vcpu *vcpu) +{ + return vcpu_get_reg(vcpu, 0); +} + +static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu) +{ + return vcpu_get_reg(vcpu, 1); +} + +static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu) +{ + return vcpu_get_reg(vcpu, 2); +} + +static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu) +{ + return vcpu_get_reg(vcpu, 3); +} + +static void smccc_set_retval(struct kvm_vcpu *vcpu, + unsigned long a0, + unsigned long a1, + unsigned long a2, + unsigned long a3) +{ + vcpu_set_reg(vcpu, 0, a0); + vcpu_set_reg(vcpu, 1, a1); + vcpu_set_reg(vcpu, 2, a2); + vcpu_set_reg(vcpu, 3, a3); +} + static unsigned long psci_affinity_mask(unsigned long affinity_level) { if (affinity_level <= 3) @@ -77,7 +109,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) unsigned long context_id; phys_addr_t target_pc; - cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; + cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; if (vcpu_mode_is_32bit(source_vcpu)) cpu_id &= ~((u32) 0); @@ -96,8 +128,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) return PSCI_RET_INVALID_PARAMS; } - target_pc = vcpu_get_reg(source_vcpu, 2); - context_id = vcpu_get_reg(source_vcpu, 3); + target_pc = smccc_get_arg2(source_vcpu); + context_id = smccc_get_arg3(source_vcpu); kvm_reset_vcpu(vcpu); @@ -116,7 +148,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) * NOTE: We always update r0 (or x0) because for PSCI v0.1 * the general puspose registers are undefined upon CPU_ON. */ - vcpu_set_reg(vcpu, 0, context_id); + smccc_set_retval(vcpu, context_id, 0, 0, 0); vcpu->arch.power_off = false; smp_mb(); /* Make sure the above is visible */ @@ -136,8 +168,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tmp; - target_affinity = vcpu_get_reg(vcpu, 1); - lowest_affinity_level = vcpu_get_reg(vcpu, 2); + target_affinity = smccc_get_arg1(vcpu); + lowest_affinity_level = smccc_get_arg2(vcpu); /* Determine target affinity mask */ target_affinity_mask = psci_affinity_mask(lowest_affinity_level); @@ -210,7 +242,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu) static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); + u32 psci_fn = smccc_get_function(vcpu); unsigned long val; int ret = 1; @@ -277,14 +309,14 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) break; } - vcpu_set_reg(vcpu, 0, val); + smccc_set_retval(vcpu, val, 0, 0, 0); return ret; } static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); + u32 psci_fn = smccc_get_function(vcpu); unsigned long val; switch (psci_fn) { @@ -302,7 +334,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) break; } - vcpu_set_reg(vcpu, 0, val); + smccc_set_retval(vcpu, val, 0, 0, 0); return 1; } From 4ba100aa94a0f8c44de187c46fee4917ce1e06aa Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:11 +0000 Subject: [PATCH 1866/3326] arm/arm64: KVM: Implement PSCI 1.0 support Commit 58e0b2239a4d upstream. PSCI 1.0 can be trivially implemented by providing the FEATURES call on top of PSCI 0.2 and returning 1.0 as the PSCI version. We happily ignore everything else, as they are either optional or are clarifications that do not require any additional change. PSCI 1.0 is now the default until we decide to add a userspace selection API. Reviewed-by: Christoffer Dall Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- include/kvm/arm_psci.h | 3 +++ virt/kvm/arm/psci.c | 45 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index 5659343580a3e0..32360432cff507 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -22,6 +22,9 @@ #define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) #define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) +#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0) + +#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 int kvm_psci_version(struct kvm_vcpu *vcpu); int kvm_psci_call(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index c41553d3511038..3e7c63e15f04f0 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -234,7 +234,7 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu) int kvm_psci_version(struct kvm_vcpu *vcpu) { if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) - return KVM_ARM_PSCI_0_2; + return KVM_ARM_PSCI_LATEST; return KVM_ARM_PSCI_0_1; } @@ -313,6 +313,47 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) return ret; } +static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu) +{ + u32 psci_fn = smccc_get_function(vcpu); + u32 feature; + unsigned long val; + int ret = 1; + + switch(psci_fn) { + case PSCI_0_2_FN_PSCI_VERSION: + val = KVM_ARM_PSCI_1_0; + break; + case PSCI_1_0_FN_PSCI_FEATURES: + feature = smccc_get_arg1(vcpu); + switch(feature) { + case PSCI_0_2_FN_PSCI_VERSION: + case PSCI_0_2_FN_CPU_SUSPEND: + case PSCI_0_2_FN64_CPU_SUSPEND: + case PSCI_0_2_FN_CPU_OFF: + case PSCI_0_2_FN_CPU_ON: + case PSCI_0_2_FN64_CPU_ON: + case PSCI_0_2_FN_AFFINITY_INFO: + case PSCI_0_2_FN64_AFFINITY_INFO: + case PSCI_0_2_FN_MIGRATE_INFO_TYPE: + case PSCI_0_2_FN_SYSTEM_OFF: + case PSCI_0_2_FN_SYSTEM_RESET: + case PSCI_1_0_FN_PSCI_FEATURES: + val = 0; + break; + default: + val = PSCI_RET_NOT_SUPPORTED; + break; + } + break; + default: + return kvm_psci_0_2_call(vcpu); + } + + smccc_set_retval(vcpu, val, 0, 0, 0); + return ret; +} + static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; @@ -355,6 +396,8 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) int kvm_psci_call(struct kvm_vcpu *vcpu) { switch (kvm_psci_version(vcpu)) { + case KVM_ARM_PSCI_1_0: + return kvm_psci_1_0_call(vcpu); case KVM_ARM_PSCI_0_2: return kvm_psci_0_2_call(vcpu); case KVM_ARM_PSCI_0_1: From 45e2061147c329fc08f81a7e2a551b92bcc2a6a3 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:12 +0000 Subject: [PATCH 1867/3326] arm/arm64: KVM: Advertise SMCCC v1.1 Commit 09e6be12effd upstream. The new SMC Calling Convention (v1.1) allows for a reduced overhead when calling into the firmware, and provides a new feature discovery mechanism. Make it visible to KVM guests. Tested-by: Ard Biesheuvel Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm/kvm/handle_exit.c | 2 +- arch/arm64/kvm/handle_exit.c | 2 +- include/kvm/arm_psci.h | 2 +- include/linux/arm-smccc.h | 13 +++++++++++++ virt/kvm/arm/psci.c | 24 +++++++++++++++++++++++- 5 files changed, 39 insertions(+), 4 deletions(-) diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 61fb27cdd9c1dc..76be5701f9d6ab 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -36,7 +36,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_vcpu_hvc_get_imm(vcpu)); vcpu->stat.hvc_exit_stat++; - ret = kvm_psci_call(vcpu); + ret = kvm_hvc_call_handler(vcpu); if (ret < 0) { kvm_inject_undefined(vcpu); return 1; diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index ba2ade75d97dd4..ab48c5ed394306 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -43,7 +43,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_vcpu_hvc_get_imm(vcpu)); vcpu->stat.hvc_exit_stat++; - ret = kvm_psci_call(vcpu); + ret = kvm_hvc_call_handler(vcpu); if (ret < 0) { vcpu_set_reg(vcpu, 0, ~0UL); return 1; diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index 32360432cff507..ed1dd8088f1cd7 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -27,6 +27,6 @@ #define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 int kvm_psci_version(struct kvm_vcpu *vcpu); -int kvm_psci_call(struct kvm_vcpu *vcpu); +int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); #endif /* __KVM_ARM_PSCI_H__ */ diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 4c5bca38c6533e..dc68aa5a7261c2 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -60,6 +60,19 @@ #define ARM_SMCCC_QUIRK_NONE 0 #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ +#define ARM_SMCCC_VERSION_1_0 0x10000 +#define ARM_SMCCC_VERSION_1_1 0x10001 + +#define ARM_SMCCC_VERSION_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0) + +#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 1) + #ifndef __ASSEMBLY__ #include diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index 3e7c63e15f04f0..46a98fee3ef50a 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -15,6 +15,7 @@ * along with this program. If not, see . */ +#include #include #include #include @@ -339,6 +340,7 @@ static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu) case PSCI_0_2_FN_SYSTEM_OFF: case PSCI_0_2_FN_SYSTEM_RESET: case PSCI_1_0_FN_PSCI_FEATURES: + case ARM_SMCCC_VERSION_FUNC_ID: val = 0; break; default: @@ -393,7 +395,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) * Errors: * -EINVAL: Unrecognized PSCI function */ -int kvm_psci_call(struct kvm_vcpu *vcpu) +static int kvm_psci_call(struct kvm_vcpu *vcpu) { switch (kvm_psci_version(vcpu)) { case KVM_ARM_PSCI_1_0: @@ -406,3 +408,23 @@ int kvm_psci_call(struct kvm_vcpu *vcpu) return -EINVAL; }; } + +int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) +{ + u32 func_id = smccc_get_function(vcpu); + u32 val = PSCI_RET_NOT_SUPPORTED; + + switch (func_id) { + case ARM_SMCCC_VERSION_FUNC_ID: + val = ARM_SMCCC_VERSION_1_1; + break; + case ARM_SMCCC_ARCH_FEATURES_FUNC_ID: + /* Nothing supported yet */ + break; + default: + return kvm_psci_call(vcpu); + } + + smccc_set_retval(vcpu, val, 0, 0, 0); + return 1; +} From 98be7165d9f71f5df9802548b7d8ab2cf6ccb211 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 3 Jan 2018 16:38:37 +0000 Subject: [PATCH 1868/3326] arm64: KVM: Make PSCI_VERSION a fast path Commit 90348689d500 upstream. For those CPUs that require PSCI to perform a BP invalidation, going all the way to the PSCI code for not much is a waste of precious cycles. Let's terminate that call as early as possible. Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kvm/hyp/switch.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 0d3d3f1e348c0e..67f0ee24a406f9 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -17,6 +17,7 @@ #include #include +#include #include #include @@ -322,6 +323,18 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) goto again; + if (exit_code == ARM_EXCEPTION_TRAP && + (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 || + kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) && + vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) { + u64 val = PSCI_RET_NOT_SUPPORTED; + if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) + val = 2; + + vcpu_set_reg(vcpu, 0, val); + goto again; + } + if (static_branch_unlikely(&vgic_v2_cpuif_trap) && exit_code == ARM_EXCEPTION_TRAP) { bool valid; From 2cfe8929f6247dbee8def7e94f334909fe5ac084 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:13 +0000 Subject: [PATCH 1869/3326] arm/arm64: KVM: Turn kvm_psci_version into a static inline Commit a4097b351118 upstream. We're about to need kvm_psci_version in HYP too. So let's turn it into a static inline, and pass the kvm structure as a second parameter (so that HYP can do a kern_hyp_va on it). Tested-by: Ard Biesheuvel Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kvm/hyp/switch.c | 20 ++++++++++++-------- include/kvm/arm_psci.h | 21 ++++++++++++++++++++- virt/kvm/arm/psci.c | 12 ++---------- 3 files changed, 34 insertions(+), 19 deletions(-) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 67f0ee24a406f9..6568656147c0f6 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -19,6 +19,8 @@ #include #include +#include + #include #include #include @@ -325,14 +327,16 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) if (exit_code == ARM_EXCEPTION_TRAP && (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 || - kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) && - vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) { - u64 val = PSCI_RET_NOT_SUPPORTED; - if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) - val = 2; - - vcpu_set_reg(vcpu, 0, val); - goto again; + kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32)) { + u32 val = vcpu_get_reg(vcpu, 0); + + if (val == PSCI_0_2_FN_PSCI_VERSION) { + val = kvm_psci_version(vcpu, kern_hyp_va(vcpu->kvm)); + if (unlikely(val == KVM_ARM_PSCI_0_1)) + val = PSCI_RET_NOT_SUPPORTED; + vcpu_set_reg(vcpu, 0, val); + goto again; + } } if (static_branch_unlikely(&vgic_v2_cpuif_trap) && diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index ed1dd8088f1cd7..e518e4e3dfb50a 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -18,6 +18,7 @@ #ifndef __KVM_ARM_PSCI_H__ #define __KVM_ARM_PSCI_H__ +#include #include #define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) @@ -26,7 +27,25 @@ #define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 -int kvm_psci_version(struct kvm_vcpu *vcpu); +/* + * We need the KVM pointer independently from the vcpu as we can call + * this from HYP, and need to apply kern_hyp_va on it... + */ +static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) +{ + /* + * Our PSCI implementation stays the same across versions from + * v0.2 onward, only adding the few mandatory functions (such + * as FEATURES with 1.0) that are required by newer + * revisions. It is thus safe to return the latest. + */ + if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) + return KVM_ARM_PSCI_LATEST; + + return KVM_ARM_PSCI_0_1; +} + + int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); #endif /* __KVM_ARM_PSCI_H__ */ diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index 46a98fee3ef50a..e105c1153794b8 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -123,7 +123,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) if (!vcpu) return PSCI_RET_INVALID_PARAMS; if (!vcpu->arch.power_off) { - if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) + if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1) return PSCI_RET_ALREADY_ON; else return PSCI_RET_INVALID_PARAMS; @@ -232,14 +232,6 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu) kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET); } -int kvm_psci_version(struct kvm_vcpu *vcpu) -{ - if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) - return KVM_ARM_PSCI_LATEST; - - return KVM_ARM_PSCI_0_1; -} - static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; @@ -397,7 +389,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) */ static int kvm_psci_call(struct kvm_vcpu *vcpu) { - switch (kvm_psci_version(vcpu)) { + switch (kvm_psci_version(vcpu, vcpu->kvm)) { case KVM_ARM_PSCI_1_0: return kvm_psci_1_0_call(vcpu); case KVM_ARM_PSCI_0_2: From e47273d086236d06c3c2851fa8599971086b2a73 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:14 +0000 Subject: [PATCH 1870/3326] arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support Commit 6167ec5c9145 upstream. A new feature of SMCCC 1.1 is that it offers firmware-based CPU workarounds. In particular, SMCCC_ARCH_WORKAROUND_1 provides BP hardening for CVE-2017-5715. If the host has some mitigation for this issue, report that we deal with it using SMCCC_ARCH_WORKAROUND_1, as we apply the host workaround on every guest exit. Tested-by: Ard Biesheuvel Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/kvm_host.h | 6 ++++++ arch/arm64/include/asm/kvm_host.h | 5 +++++ include/linux/arm-smccc.h | 5 +++++ virt/kvm/arm/psci.c | 9 ++++++++- 4 files changed, 24 insertions(+), 1 deletion(-) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 4a879f6ff13bea..31fbb9285f62a8 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -293,4 +293,10 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); +static inline bool kvm_arm_harden_branch_predictor(void) +{ + /* No way to detect it yet, pretend it is not there. */ + return false; +} + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e923b58606e2bf..8ad208cb866c15 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -384,4 +384,9 @@ static inline void __cpu_init_stage2(void) "PARange is %d bits, unsupported configuration!", parange); } +static inline bool kvm_arm_harden_branch_predictor(void) +{ + return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR); +} + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index dc68aa5a7261c2..e1ef944ef1da9b 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -73,6 +73,11 @@ ARM_SMCCC_SMC_32, \ 0, 1) +#define ARM_SMCCC_ARCH_WORKAROUND_1 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x8000) + #ifndef __ASSEMBLY__ #include diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index e105c1153794b8..6919352cbf15e8 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -405,13 +405,20 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) { u32 func_id = smccc_get_function(vcpu); u32 val = PSCI_RET_NOT_SUPPORTED; + u32 feature; switch (func_id) { case ARM_SMCCC_VERSION_FUNC_ID: val = ARM_SMCCC_VERSION_1_1; break; case ARM_SMCCC_ARCH_FEATURES_FUNC_ID: - /* Nothing supported yet */ + feature = smccc_get_arg1(vcpu); + switch(feature) { + case ARM_SMCCC_ARCH_WORKAROUND_1: + if (kvm_arm_harden_branch_predictor()) + val = 0; + break; + } break; default: return kvm_psci_call(vcpu); From 6db26ad1dc4685843ab3c4d655b51777e04d131e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:15 +0000 Subject: [PATCH 1871/3326] arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling Commit f72af90c3783 upstream. We want SMCCC_ARCH_WORKAROUND_1 to be fast. As fast as possible. So let's intercept it as early as we can by testing for the function call number as soon as we've identified a HVC call coming from the guest. Tested-by: Ard Biesheuvel Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kvm/hyp/hyp-entry.S | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 5170ce1021da05..f49b53331d2811 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -15,6 +15,7 @@ * along with this program. If not, see . */ +#include #include #include @@ -64,10 +65,11 @@ alternative_endif lsr x0, x1, #ESR_ELx_EC_SHIFT cmp x0, #ESR_ELx_EC_HVC64 + ccmp x0, #ESR_ELx_EC_HVC32, #4, ne b.ne el1_trap - mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest - cbnz x1, el1_trap // called HVC + mrs x1, vttbr_el2 // If vttbr is valid, the guest + cbnz x1, el1_hvc_guest // called HVC /* Here, we're pretty sure the host called HVC. */ ldp x0, x1, [sp], #16 @@ -100,6 +102,20 @@ alternative_endif eret +el1_hvc_guest: + /* + * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. + * The workaround has already been applied on the host, + * so let's quickly get back to the guest. We don't bother + * restoring x1, as it can be clobbered anyway. + */ + ldr x1, [sp] // Guest's x0 + eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1 + cbnz w1, el1_trap + mov x0, x1 + add sp, sp, #16 + eret + el1_trap: /* * x0: ESR_EC From 906a9f396cc8005807f6741e881da0ad317c4091 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:16 +0000 Subject: [PATCH 1872/3326] firmware/psci: Expose PSCI conduit Commit 09a8d6d48499 upstream. In order to call into the firmware to apply workarounds, it is useful to find out whether we're using HVC or SMC. Let's expose this through the psci_ops. Acked-by: Lorenzo Pieralisi Reviewed-by: Robin Murphy Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/psci.c | 28 +++++++++++++++++++++++----- include/linux/psci.h | 7 +++++++ 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index 8b25d31e84016b..e9493da2b111e6 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -59,7 +59,9 @@ bool psci_tos_resident_on(int cpu) return cpu == resident_cpu; } -struct psci_operations psci_ops; +struct psci_operations psci_ops = { + .conduit = PSCI_CONDUIT_NONE, +}; typedef unsigned long (psci_fn)(unsigned long, unsigned long, unsigned long, unsigned long); @@ -210,6 +212,22 @@ static unsigned long psci_migrate_info_up_cpu(void) 0, 0, 0); } +static void set_conduit(enum psci_conduit conduit) +{ + switch (conduit) { + case PSCI_CONDUIT_HVC: + invoke_psci_fn = __invoke_psci_fn_hvc; + break; + case PSCI_CONDUIT_SMC: + invoke_psci_fn = __invoke_psci_fn_smc; + break; + default: + WARN(1, "Unexpected PSCI conduit %d\n", conduit); + } + + psci_ops.conduit = conduit; +} + static int get_set_conduit_method(struct device_node *np) { const char *method; @@ -222,9 +240,9 @@ static int get_set_conduit_method(struct device_node *np) } if (!strcmp("hvc", method)) { - invoke_psci_fn = __invoke_psci_fn_hvc; + set_conduit(PSCI_CONDUIT_HVC); } else if (!strcmp("smc", method)) { - invoke_psci_fn = __invoke_psci_fn_smc; + set_conduit(PSCI_CONDUIT_SMC); } else { pr_warn("invalid \"method\" property: %s\n", method); return -EINVAL; @@ -654,9 +672,9 @@ int __init psci_acpi_init(void) pr_info("probing for conduit method from ACPI.\n"); if (acpi_psci_use_hvc()) - invoke_psci_fn = __invoke_psci_fn_hvc; + set_conduit(PSCI_CONDUIT_HVC); else - invoke_psci_fn = __invoke_psci_fn_smc; + set_conduit(PSCI_CONDUIT_SMC); return psci_probe(); } diff --git a/include/linux/psci.h b/include/linux/psci.h index 6306ab10af1821..66ff54787d3e95 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -25,6 +25,12 @@ bool psci_tos_resident_on(int cpu); int psci_cpu_init_idle(unsigned int cpu); int psci_cpu_suspend_enter(unsigned long index); +enum psci_conduit { + PSCI_CONDUIT_NONE, + PSCI_CONDUIT_SMC, + PSCI_CONDUIT_HVC, +}; + struct psci_operations { u32 (*get_version)(void); int (*cpu_suspend)(u32 state, unsigned long entry_point); @@ -34,6 +40,7 @@ struct psci_operations { int (*affinity_info)(unsigned long target_affinity, unsigned long lowest_affinity_level); int (*migrate_info_type)(void); + enum psci_conduit conduit; }; extern struct psci_operations psci_ops; From 908ad7a1484d78228bc88d242121574f86eb35e8 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:17 +0000 Subject: [PATCH 1873/3326] firmware/psci: Expose SMCCC version through psci_ops Commit e78eef554a91 upstream. Since PSCI 1.0 allows the SMCCC version to be (indirectly) probed, let's do that at boot time, and expose the version of the calling convention as part of the psci_ops structure. Acked-by: Lorenzo Pieralisi Reviewed-by: Robin Murphy Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/psci.c | 27 +++++++++++++++++++++++++++ include/linux/psci.h | 6 ++++++ 2 files changed, 33 insertions(+) diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index e9493da2b111e6..c80ec1d0327441 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -61,6 +61,7 @@ bool psci_tos_resident_on(int cpu) struct psci_operations psci_ops = { .conduit = PSCI_CONDUIT_NONE, + .smccc_version = SMCCC_VERSION_1_0, }; typedef unsigned long (psci_fn)(unsigned long, unsigned long, @@ -511,6 +512,31 @@ static void __init psci_init_migrate(void) pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid); } +static void __init psci_init_smccc(void) +{ + u32 ver = ARM_SMCCC_VERSION_1_0; + int feature; + + feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID); + + if (feature != PSCI_RET_NOT_SUPPORTED) { + u32 ret; + ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0); + if (ret == ARM_SMCCC_VERSION_1_1) { + psci_ops.smccc_version = SMCCC_VERSION_1_1; + ver = ret; + } + } + + /* + * Conveniently, the SMCCC and PSCI versions are encoded the + * same way. No, this isn't accidental. + */ + pr_info("SMC Calling Convention v%d.%d\n", + PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver)); + +} + static void __init psci_0_2_set_functions(void) { pr_info("Using standard PSCI v0.2 function IDs\n"); @@ -559,6 +585,7 @@ static int __init psci_probe(void) psci_init_migrate(); if (PSCI_VERSION_MAJOR(ver) >= 1) { + psci_init_smccc(); psci_init_cpu_suspend(); psci_init_system_suspend(); } diff --git a/include/linux/psci.h b/include/linux/psci.h index 66ff54787d3e95..347077cf19c692 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -31,6 +31,11 @@ enum psci_conduit { PSCI_CONDUIT_HVC, }; +enum smccc_version { + SMCCC_VERSION_1_0, + SMCCC_VERSION_1_1, +}; + struct psci_operations { u32 (*get_version)(void); int (*cpu_suspend)(u32 state, unsigned long entry_point); @@ -41,6 +46,7 @@ struct psci_operations { unsigned long lowest_affinity_level); int (*migrate_info_type)(void); enum psci_conduit conduit; + enum smccc_version smccc_version; }; extern struct psci_operations psci_ops; From 37dc3e6c117eced753d6ce6cce85535cec3ad013 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:18 +0000 Subject: [PATCH 1874/3326] arm/arm64: smccc: Make function identifiers an unsigned quantity Commit ded4c39e93f3 upstream. Function identifiers are a 32bit, unsigned quantity. But we never tell so to the compiler, resulting in the following: 4ac: b26187e0 mov x0, #0xffffffff80000001 We thus rely on the firmware narrowing it for us, which is not always a reasonable expectation. Cc: stable@vger.kernel.org Reported-by: Ard Biesheuvel Acked-by: Ard Biesheuvel Reviewed-by: Robin Murphy Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- include/linux/arm-smccc.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index e1ef944ef1da9b..dd44d8458c04e4 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -14,14 +14,16 @@ #ifndef __LINUX_ARM_SMCCC_H #define __LINUX_ARM_SMCCC_H +#include + /* * This file provides common defines for ARM SMC Calling Convention as * specified in * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html */ -#define ARM_SMCCC_STD_CALL 0 -#define ARM_SMCCC_FAST_CALL 1 +#define ARM_SMCCC_STD_CALL _AC(0,U) +#define ARM_SMCCC_FAST_CALL _AC(1,U) #define ARM_SMCCC_TYPE_SHIFT 31 #define ARM_SMCCC_SMC_32 0 From ac63fdb4a2b229bdd7ad8449a88791ad5da5f572 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:19 +0000 Subject: [PATCH 1875/3326] arm/arm64: smccc: Implement SMCCC v1.1 inline primitive Commit f2d3b2e8759a upstream. One of the major improvement of SMCCC v1.1 is that it only clobbers the first 4 registers, both on 32 and 64bit. This means that it becomes very easy to provide an inline version of the SMC call primitive, and avoid performing a function call to stash the registers that would otherwise be clobbered by SMCCC v1.0. Reviewed-by: Robin Murphy Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- include/linux/arm-smccc.h | 141 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 141 insertions(+) diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index dd44d8458c04e4..a031897fca76b2 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -150,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__) +/* SMCCC v1.1 implementation madness follows */ +#ifdef CONFIG_ARM64 + +#define SMCCC_SMC_INST "smc #0" +#define SMCCC_HVC_INST "hvc #0" + +#elif defined(CONFIG_ARM) +#include +#include + +#define SMCCC_SMC_INST __SMC(0) +#define SMCCC_HVC_INST __HVC(0) + +#endif + +#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x + +#define __count_args(...) \ + ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0) + +#define __constraint_write_0 \ + "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3) +#define __constraint_write_1 \ + "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3) +#define __constraint_write_2 \ + "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3) +#define __constraint_write_3 \ + "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3) +#define __constraint_write_4 __constraint_write_3 +#define __constraint_write_5 __constraint_write_4 +#define __constraint_write_6 __constraint_write_5 +#define __constraint_write_7 __constraint_write_6 + +#define __constraint_read_0 +#define __constraint_read_1 +#define __constraint_read_2 +#define __constraint_read_3 +#define __constraint_read_4 "r" (r4) +#define __constraint_read_5 __constraint_read_4, "r" (r5) +#define __constraint_read_6 __constraint_read_5, "r" (r6) +#define __constraint_read_7 __constraint_read_6, "r" (r7) + +#define __declare_arg_0(a0, res) \ + struct arm_smccc_res *___res = res; \ + register u32 r0 asm("r0") = a0; \ + register unsigned long r1 asm("r1"); \ + register unsigned long r2 asm("r2"); \ + register unsigned long r3 asm("r3") + +#define __declare_arg_1(a0, a1, res) \ + struct arm_smccc_res *___res = res; \ + register u32 r0 asm("r0") = a0; \ + register typeof(a1) r1 asm("r1") = a1; \ + register unsigned long r2 asm("r2"); \ + register unsigned long r3 asm("r3") + +#define __declare_arg_2(a0, a1, a2, res) \ + struct arm_smccc_res *___res = res; \ + register u32 r0 asm("r0") = a0; \ + register typeof(a1) r1 asm("r1") = a1; \ + register typeof(a2) r2 asm("r2") = a2; \ + register unsigned long r3 asm("r3") + +#define __declare_arg_3(a0, a1, a2, a3, res) \ + struct arm_smccc_res *___res = res; \ + register u32 r0 asm("r0") = a0; \ + register typeof(a1) r1 asm("r1") = a1; \ + register typeof(a2) r2 asm("r2") = a2; \ + register typeof(a3) r3 asm("r3") = a3 + +#define __declare_arg_4(a0, a1, a2, a3, a4, res) \ + __declare_arg_3(a0, a1, a2, a3, res); \ + register typeof(a4) r4 asm("r4") = a4 + +#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ + __declare_arg_4(a0, a1, a2, a3, a4, res); \ + register typeof(a5) r5 asm("r5") = a5 + +#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ + __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ + register typeof(a6) r6 asm("r6") = a6 + +#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ + __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ + register typeof(a7) r7 asm("r7") = a7 + +#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) +#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) + +#define ___constraints(count) \ + : __constraint_write_ ## count \ + : __constraint_read_ ## count \ + : "memory" +#define __constraints(count) ___constraints(count) + +/* + * We have an output list that is not necessarily used, and GCC feels + * entitled to optimise the whole sequence away. "volatile" is what + * makes it stick. + */ +#define __arm_smccc_1_1(inst, ...) \ + do { \ + __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ + asm volatile(inst "\n" \ + __constraints(__count_args(__VA_ARGS__))); \ + if (___res) \ + *___res = (typeof(*___res)){r0, r1, r2, r3}; \ + } while (0) + +/* + * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call + * + * This is a variadic macro taking one to eight source arguments, and + * an optional return structure. + * + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This macro is used to make SMC calls following SMC Calling Convention v1.1. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the SMC instruction. The return values are updated with the content + * from register 0 to 3 on return from the SMC instruction if not NULL. + */ +#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__) + +/* + * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call + * + * This is a variadic macro taking one to eight source arguments, and + * an optional return structure. + * + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This macro is used to make HVC calls following SMC Calling Convention v1.1. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the HVC instruction. The return values are updated with the content + * from register 0 to 3 on return from the HVC instruction if not NULL. + */ +#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__) + #endif /*__ASSEMBLY__*/ #endif /*__LINUX_ARM_SMCCC_H*/ From dbca45b996550f1ab646011f48bede5b9c2e2ea9 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:20 +0000 Subject: [PATCH 1876/3326] arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support Commit b092201e0020 upstream. Add the detection and runtime code for ARM_SMCCC_ARCH_WORKAROUND_1. It is lovely. Really. Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/bpi.S | 20 ++++++++++ arch/arm64/kernel/cpu_errata.c | 68 +++++++++++++++++++++++++++++++++- 2 files changed, 87 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S index 76225c2611eac5..fdeed629f2c6dd 100644 --- a/arch/arm64/kernel/bpi.S +++ b/arch/arm64/kernel/bpi.S @@ -17,6 +17,7 @@ */ #include +#include .macro ventry target .rept 31 @@ -85,3 +86,22 @@ ENTRY(__qcom_hyp_sanitize_link_stack_start) .endr ldp x29, x30, [sp], #16 ENTRY(__qcom_hyp_sanitize_link_stack_end) + +.macro smccc_workaround_1 inst + sub sp, sp, #(8 * 4) + stp x2, x3, [sp, #(8 * 0)] + stp x0, x1, [sp, #(8 * 2)] + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 + \inst #0 + ldp x2, x3, [sp, #(8 * 0)] + ldp x0, x1, [sp, #(8 * 2)] + add sp, sp, #(8 * 4) +.endm + +ENTRY(__smccc_workaround_1_smc_start) + smccc_workaround_1 smc +ENTRY(__smccc_workaround_1_smc_end) + +ENTRY(__smccc_workaround_1_hvc_start) + smccc_workaround_1 hvc +ENTRY(__smccc_workaround_1_hvc_end) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index ed688188223175..9e77809a3b2307 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -70,6 +70,10 @@ DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[]; extern char __qcom_hyp_sanitize_link_stack_start[]; extern char __qcom_hyp_sanitize_link_stack_end[]; +extern char __smccc_workaround_1_smc_start[]; +extern char __smccc_workaround_1_smc_end[]; +extern char __smccc_workaround_1_hvc_start[]; +extern char __smccc_workaround_1_hvc_end[]; static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, const char *hyp_vecs_end) @@ -116,6 +120,10 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, #define __psci_hyp_bp_inval_end NULL #define __qcom_hyp_sanitize_link_stack_start NULL #define __qcom_hyp_sanitize_link_stack_end NULL +#define __smccc_workaround_1_smc_start NULL +#define __smccc_workaround_1_smc_end NULL +#define __smccc_workaround_1_hvc_start NULL +#define __smccc_workaround_1_hvc_end NULL static void __install_bp_hardening_cb(bp_hardening_cb_t fn, const char *hyp_vecs_start, @@ -142,17 +150,75 @@ static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); } +#include +#include #include +static void call_smc_arch_workaround_1(void) +{ + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); +} + +static void call_hvc_arch_workaround_1(void) +{ + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); +} + +static bool check_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) +{ + bp_hardening_cb_t cb; + void *smccc_start, *smccc_end; + struct arm_smccc_res res; + + if (!entry->matches(entry, SCOPE_LOCAL_CPU)) + return false; + + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) + return false; + + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + if (res.a0) + return false; + cb = call_hvc_arch_workaround_1; + smccc_start = __smccc_workaround_1_hvc_start; + smccc_end = __smccc_workaround_1_hvc_end; + break; + + case PSCI_CONDUIT_SMC: + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + if (res.a0) + return false; + cb = call_smc_arch_workaround_1; + smccc_start = __smccc_workaround_1_smc_start; + smccc_end = __smccc_workaround_1_smc_end; + break; + + default: + return false; + } + + install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); + + return true; +} + static int enable_psci_bp_hardening(void *data) { const struct arm64_cpu_capabilities *entry = data; - if (psci_ops.get_version) + if (psci_ops.get_version) { + if (check_smccc_arch_workaround_1(entry)) + return 0; + install_bp_hardening_cb(entry, (bp_hardening_cb_t)psci_ops.get_version, __psci_hyp_bp_inval_start, __psci_hyp_bp_inval_end); + } return 0; } From c584c903bae9a1ec6e881847713df9c1b8b87df0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:21 +0000 Subject: [PATCH 1877/3326] arm64: Kill PSCI_GET_VERSION as a variant-2 workaround Commit 3a0a397ff5ff upstream. Now that we've standardised on SMCCC v1.1 to perform the branch prediction invalidation, let's drop the previous band-aid. If vendors haven't updated their firmware to do SMCCC 1.1, they haven't updated PSCI either, so we don't loose anything. Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/bpi.S | 24 ------------------ arch/arm64/kernel/cpu_errata.c | 45 ++++++++++------------------------ arch/arm64/kvm/hyp/switch.c | 14 ----------- 3 files changed, 13 insertions(+), 70 deletions(-) diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S index fdeed629f2c6dd..e5de33513b5d77 100644 --- a/arch/arm64/kernel/bpi.S +++ b/arch/arm64/kernel/bpi.S @@ -54,30 +54,6 @@ ENTRY(__bp_harden_hyp_vecs_start) vectors __kvm_hyp_vector .endr ENTRY(__bp_harden_hyp_vecs_end) -ENTRY(__psci_hyp_bp_inval_start) - sub sp, sp, #(8 * 18) - stp x16, x17, [sp, #(16 * 0)] - stp x14, x15, [sp, #(16 * 1)] - stp x12, x13, [sp, #(16 * 2)] - stp x10, x11, [sp, #(16 * 3)] - stp x8, x9, [sp, #(16 * 4)] - stp x6, x7, [sp, #(16 * 5)] - stp x4, x5, [sp, #(16 * 6)] - stp x2, x3, [sp, #(16 * 7)] - stp x0, x1, [sp, #(16 * 8)] - mov x0, #0x84000000 - smc #0 - ldp x16, x17, [sp, #(16 * 0)] - ldp x14, x15, [sp, #(16 * 1)] - ldp x12, x13, [sp, #(16 * 2)] - ldp x10, x11, [sp, #(16 * 3)] - ldp x8, x9, [sp, #(16 * 4)] - ldp x6, x7, [sp, #(16 * 5)] - ldp x4, x5, [sp, #(16 * 6)] - ldp x2, x3, [sp, #(16 * 7)] - ldp x0, x1, [sp, #(16 * 8)] - add sp, sp, #(8 * 18) -ENTRY(__psci_hyp_bp_inval_end) ENTRY(__qcom_hyp_sanitize_link_stack_start) stp x29, x30, [sp, #-16]! diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 9e77809a3b2307..07823595b7f016 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -67,7 +67,6 @@ static int cpu_enable_trap_ctr_access(void *__unused) DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); #ifdef CONFIG_KVM -extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[]; extern char __qcom_hyp_sanitize_link_stack_start[]; extern char __qcom_hyp_sanitize_link_stack_end[]; extern char __smccc_workaround_1_smc_start[]; @@ -116,8 +115,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, spin_unlock(&bp_lock); } #else -#define __psci_hyp_bp_inval_start NULL -#define __psci_hyp_bp_inval_end NULL #define __qcom_hyp_sanitize_link_stack_start NULL #define __qcom_hyp_sanitize_link_stack_end NULL #define __smccc_workaround_1_smc_start NULL @@ -164,24 +161,25 @@ static void call_hvc_arch_workaround_1(void) arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); } -static bool check_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) +static int enable_smccc_arch_workaround_1(void *data) { + const struct arm64_cpu_capabilities *entry = data; bp_hardening_cb_t cb; void *smccc_start, *smccc_end; struct arm_smccc_res res; if (!entry->matches(entry, SCOPE_LOCAL_CPU)) - return false; + return 0; if (psci_ops.smccc_version == SMCCC_VERSION_1_0) - return false; + return 0; switch (psci_ops.conduit) { case PSCI_CONDUIT_HVC: arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_WORKAROUND_1, &res); if (res.a0) - return false; + return 0; cb = call_hvc_arch_workaround_1; smccc_start = __smccc_workaround_1_hvc_start; smccc_end = __smccc_workaround_1_hvc_end; @@ -191,35 +189,18 @@ static bool check_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *e arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_WORKAROUND_1, &res); if (res.a0) - return false; + return 0; cb = call_smc_arch_workaround_1; smccc_start = __smccc_workaround_1_smc_start; smccc_end = __smccc_workaround_1_smc_end; break; default: - return false; + return 0; } install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); - return true; -} - -static int enable_psci_bp_hardening(void *data) -{ - const struct arm64_cpu_capabilities *entry = data; - - if (psci_ops.get_version) { - if (check_smccc_arch_workaround_1(entry)) - return 0; - - install_bp_hardening_cb(entry, - (bp_hardening_cb_t)psci_ops.get_version, - __psci_hyp_bp_inval_start, - __psci_hyp_bp_inval_end); - } - return 0; } @@ -399,22 +380,22 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), - .enable = enable_psci_bp_hardening, + .enable = enable_smccc_arch_workaround_1, }, { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), - .enable = enable_psci_bp_hardening, + .enable = enable_smccc_arch_workaround_1, }, { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), - .enable = enable_psci_bp_hardening, + .enable = enable_smccc_arch_workaround_1, }, { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), - .enable = enable_psci_bp_hardening, + .enable = enable_smccc_arch_workaround_1, }, { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, @@ -428,12 +409,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), - .enable = enable_psci_bp_hardening, + .enable = enable_smccc_arch_workaround_1, }, { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), - .enable = enable_psci_bp_hardening, + .enable = enable_smccc_arch_workaround_1, }, #endif { diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 6568656147c0f6..79364d3455c046 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -325,20 +325,6 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) goto again; - if (exit_code == ARM_EXCEPTION_TRAP && - (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 || - kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32)) { - u32 val = vcpu_get_reg(vcpu, 0); - - if (val == PSCI_0_2_FN_PSCI_VERSION) { - val = kvm_psci_version(vcpu, kern_hyp_va(vcpu->kvm)); - if (unlikely(val == KVM_ARM_PSCI_0_1)) - val = PSCI_RET_NOT_SUPPORTED; - vcpu_set_reg(vcpu, 0, val); - goto again; - } - } - if (static_branch_unlikely(&vgic_v2_cpuif_trap) && exit_code == ARM_EXCEPTION_TRAP) { bool valid; From 0763f0418bb53f4724a6d711bf3b735a71ce3d24 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Oct 2017 15:54:10 +0200 Subject: [PATCH 1878/3326] mtd: cfi: convert inline functions to macros commit 9e343e87d2c4c707ef8fae2844864d4dde3a2d13 upstream. The map_word_() functions, dating back to linux-2.6.8, try to perform bitwise operations on a 'map_word' structure. This may have worked with compilers that were current then (gcc-3.4 or earlier), but end up being rather inefficient on any version I could try now (gcc-4.4 or higher). Specifically we hit a problem analyzed in gcc PR81715 where we fail to reuse the stack space for local variables. This can be seen immediately in the stack consumption for cfi_staa_erase_varsize() and other functions that (with CONFIG_KASAN) can be up to 2200 bytes. Changing the inline functions into macros brings this down to 1280 bytes. Without KASAN, the same problem exists, but the stack consumption is lower to start with, my patch shrinks it from 920 to 496 bytes on with arm-linux-gnueabi-gcc-5.4, and saves around 1KB in .text size for cfi_cmdset_0020.c, as it avoids copying map_word structures for each call to one of these helpers. With the latest gcc-8 snapshot, the problem is fixed in upstream gcc, but nobody uses that yet, so we should still work around it in mainline kernels and probably backport the workaround to stable kernels as well. We had a couple of other functions that suffered from the same gcc bug, and all of those had a simpler workaround involving dummy variables in the inline function. Unfortunately that did not work here, the macro hack was the best I could come up with. It would also be helpful to have someone to a little performance testing on the patch, to see how much it helps in terms of CPU utilitzation. Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 Signed-off-by: Arnd Bergmann Acked-by: Richard Weinberger Signed-off-by: Boris Brezillon Signed-off-by: Greg Kroah-Hartman --- include/linux/mtd/map.h | 130 +++++++++++++++++++--------------------- 1 file changed, 61 insertions(+), 69 deletions(-) diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 3aa56e3104bb74..b5b43f94f31162 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -270,75 +270,67 @@ void map_destroy(struct mtd_info *mtd); #define INVALIDATE_CACHED_RANGE(map, from, size) \ do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) - -static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2) -{ - int i; - - for (i = 0; i < map_words(map); i++) { - if (val1.x[i] != val2.x[i]) - return 0; - } - - return 1; -} - -static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2) -{ - map_word r; - int i; - - for (i = 0; i < map_words(map); i++) - r.x[i] = val1.x[i] & val2.x[i]; - - return r; -} - -static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2) -{ - map_word r; - int i; - - for (i = 0; i < map_words(map); i++) - r.x[i] = val1.x[i] & ~val2.x[i]; - - return r; -} - -static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2) -{ - map_word r; - int i; - - for (i = 0; i < map_words(map); i++) - r.x[i] = val1.x[i] | val2.x[i]; - - return r; -} - -static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3) -{ - int i; - - for (i = 0; i < map_words(map); i++) { - if ((val1.x[i] & val2.x[i]) != val3.x[i]) - return 0; - } - - return 1; -} - -static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2) -{ - int i; - - for (i = 0; i < map_words(map); i++) { - if (val1.x[i] & val2.x[i]) - return 1; - } - - return 0; -} +#define map_word_equal(map, val1, val2) \ +({ \ + int i, ret = 1; \ + for (i = 0; i < map_words(map); i++) \ + if ((val1).x[i] != (val2).x[i]) { \ + ret = 0; \ + break; \ + } \ + ret; \ +}) + +#define map_word_and(map, val1, val2) \ +({ \ + map_word r; \ + int i; \ + for (i = 0; i < map_words(map); i++) \ + r.x[i] = (val1).x[i] & (val2).x[i]; \ + r; \ +}) + +#define map_word_clr(map, val1, val2) \ +({ \ + map_word r; \ + int i; \ + for (i = 0; i < map_words(map); i++) \ + r.x[i] = (val1).x[i] & ~(val2).x[i]; \ + r; \ +}) + +#define map_word_or(map, val1, val2) \ +({ \ + map_word r; \ + int i; \ + for (i = 0; i < map_words(map); i++) \ + r.x[i] = (val1).x[i] | (val2).x[i]; \ + r; \ +}) + +#define map_word_andequal(map, val1, val2, val3) \ +({ \ + int i, ret = 1; \ + for (i = 0; i < map_words(map); i++) { \ + if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \ + ret = 0; \ + break; \ + } \ + } \ + ret; \ +}) + +#define map_word_bitsset(map, val1, val2) \ +({ \ + int i, ret = 0; \ + for (i = 0; i < map_words(map); i++) { \ + if ((val1).x[i] & (val2).x[i]) { \ + ret = 1; \ + break; \ + } \ + } \ + ret; \ +}) static inline map_word map_word_load(struct map_info *map, const void *ptr) { From 4527b0887bc5ae69513215f91233c27808cd1f04 Mon Sep 17 00:00:00 2001 From: Kamal Dasu Date: Mon, 8 Jan 2018 15:36:48 -0500 Subject: [PATCH 1879/3326] mtd: nand: brcmnand: Disable prefetch by default commit f953f0f89663c39f08f4baaa8a4a881401b65654 upstream. Brcm nand controller prefetch feature needs to be disabled by default. Enabling affects performance on random reads as well as dma reads. Signed-off-by: Kamal Dasu Fixes: 27c5b17cd1b1 ("mtd: nand: add NAND driver "library" for Broadcom STB NAND controller") Acked-by: Florian Fainelli Signed-off-by: Boris Brezillon Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/brcmnand/brcmnand.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index e0eb51d8c01299..edf24c148fa604 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c @@ -2193,16 +2193,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) if (ctrl->nand_version >= 0x0702) tmp |= ACC_CONTROL_RD_ERASED; tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; - if (ctrl->features & BRCMNAND_HAS_PREFETCH) { - /* - * FIXME: Flash DMA + prefetch may see spurious erased-page ECC - * errors - */ - if (has_flash_dma(ctrl)) - tmp &= ~ACC_CONTROL_PREFETCH; - else - tmp |= ACC_CONTROL_PREFETCH; - } + if (ctrl->features & BRCMNAND_HAS_PREFETCH) + tmp &= ~ACC_CONTROL_PREFETCH; + nand_writereg(ctrl, offs, tmp); return 0; From 16d11602c83b45f448355e02dd97a0d855016ef3 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 12 Jan 2018 10:13:36 +0100 Subject: [PATCH 1880/3326] mtd: nand: Fix nand_do_read_oob() return value commit 87e89ce8d0d14f573c068c61bec2117751fb5103 upstream. Starting from commit 041e4575f034 ("mtd: nand: handle ECC errors in OOB"), nand_do_read_oob() (from the NAND core) did return 0 or a negative error, and the MTD layer expected it. However, the trend for the NAND layer is now to return an error or a positive number of bitflips. Deciding which status to return to the user belongs to the MTD layer. Commit e47f68587b82 ("mtd: check for max_bitflips in mtd_read_oob()") brought this logic to the mtd_read_oob() function while the return value coming from nand_do_read_oob() (called by the ->_read_oob() hook) was left unchanged. Fixes: e47f68587b82 ("mtd: check for max_bitflips in mtd_read_oob()") Signed-off-by: Miquel Raynal Signed-off-by: Boris Brezillon Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/nand_base.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 3f1d806e590a11..a0b0302aea14a9 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2201,6 +2201,7 @@ EXPORT_SYMBOL(nand_write_oob_syndrome); static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { + unsigned int max_bitflips = 0; int page, realpage, chipnr; struct nand_chip *chip = mtd_to_nand(mtd); struct mtd_ecc_stats stats; @@ -2258,6 +2259,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, nand_wait_ready(mtd); } + max_bitflips = max_t(unsigned int, max_bitflips, ret); + readlen -= len; if (!readlen) break; @@ -2283,7 +2286,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, if (mtd->ecc_stats.failed - stats.failed) return -EBADMSG; - return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; + return max_bitflips; } /** From 7eb52a6889e90bcb1280cf2d93f8a80602191d5c Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Wed, 24 Jan 2018 23:49:31 +0100 Subject: [PATCH 1881/3326] mtd: nand: sunxi: Fix ECC strength choice commit f4c6cd1a7f2275d5bc0e494b21fff26f8dde80f0 upstream. When the requested ECC strength does not exactly match the strengths supported by the ECC engine, the driver is selecting the closest strength meeting the 'selected_strength > requested_strength' constraint. Fix the fact that, in this particular case, ecc->strength value was not updated to match the 'selected_strength'. For instance, one can encounter this issue when no ECC requirement is filled in the device tree while the NAND chip minimum requirement is not a strength/step_size combo natively supported by the ECC engine. Fixes: 1fef62c1423b ("mtd: nand: add sunxi NAND flash controller support") Suggested-by: Boris Brezillon Signed-off-by: Miquel Raynal Signed-off-by: Boris Brezillon Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/sunxi_nand.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index 82244be3e76684..95897482158211 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -1853,8 +1853,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd, /* Add ECC info retrieval from DT */ for (i = 0; i < ARRAY_SIZE(strengths); i++) { - if (ecc->strength <= strengths[i]) + if (ecc->strength <= strengths[i]) { + /* + * Update ecc->strength value with the actual strength + * that will be used by the ECC engine. + */ + ecc->strength = strengths[i]; break; + } } if (i >= ARRAY_SIZE(strengths)) { From c94c39ac85f11698f3f0babb515f79656df3917e Mon Sep 17 00:00:00 2001 From: Clay McClure Date: Thu, 21 Sep 2017 19:01:34 -0700 Subject: [PATCH 1882/3326] ubi: Fix race condition between ubi volume creation and udev commit a51a0c8d213594bc094cb8e54aad0cb6d7f7b9a6 upstream. Similar to commit 714fb87e8bc0 ("ubi: Fix race condition between ubi device creation and udev"), we should make the volume active before registering it. Signed-off-by: Clay McClure Signed-off-by: Richard Weinberger Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/ubi/vmt.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 85237cf661f924..3fd8d7ff7a02ff 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -270,6 +270,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) vol->last_eb_bytes = vol->usable_leb_size; } + /* Make volume "available" before it becomes accessible via sysfs */ + spin_lock(&ubi->volumes_lock); + ubi->volumes[vol_id] = vol; + ubi->vol_count += 1; + spin_unlock(&ubi->volumes_lock); + /* Register character device for the volume */ cdev_init(&vol->cdev, &ubi_vol_cdev_operations); vol->cdev.owner = THIS_MODULE; @@ -298,11 +304,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) if (err) goto out_sysfs; - spin_lock(&ubi->volumes_lock); - ubi->volumes[vol_id] = vol; - ubi->vol_count += 1; - spin_unlock(&ubi->volumes_lock); - ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED); self_check_volumes(ubi); return err; @@ -315,6 +316,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) */ cdev_device_del(&vol->cdev, &vol->dev); out_mapping: + spin_lock(&ubi->volumes_lock); + ubi->volumes[vol_id] = NULL; + ubi->vol_count -= 1; + spin_unlock(&ubi->volumes_lock); ubi_eba_destroy_table(eba_tbl); out_acc: spin_lock(&ubi->volumes_lock); From 5793f39de7f64938c23cc062b1e170505789b0b0 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 5 Dec 2017 16:01:20 +0100 Subject: [PATCH 1883/3326] ubi: fastmap: Erase outdated anchor PEBs during attach commit f78e5623f45bab2b726eec29dc5cefbbab2d0b1c upstream. The fastmap update code might erase the current fastmap anchor PEB in case it doesn't find any new free PEB. When a power cut happens in this situation we must not have any outdated fastmap anchor PEB on the device, because that would be used to attach during next boot. The easiest way to make that sure is to erase all outdated fastmap anchor PEBs synchronously during attach. Signed-off-by: Sascha Hauer Reviewed-by: Richard Weinberger Fixes: dbb7d2a88d2a ("UBI: Add fastmap core") Signed-off-by: Richard Weinberger Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/ubi/wl.c | 77 ++++++++++++++++++++++++++++++++------------ 1 file changed, 57 insertions(+), 20 deletions(-) diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index b5b8cd6f481c54..668b46202507ce 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1528,6 +1528,46 @@ static void shutdown_work(struct ubi_device *ubi) } } +/** + * erase_aeb - erase a PEB given in UBI attach info PEB + * @ubi: UBI device description object + * @aeb: UBI attach info PEB + * @sync: If true, erase synchronously. Otherwise schedule for erasure + */ +static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync) +{ + struct ubi_wl_entry *e; + int err; + + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); + if (!e) + return -ENOMEM; + + e->pnum = aeb->pnum; + e->ec = aeb->ec; + ubi->lookuptbl[e->pnum] = e; + + if (sync) { + err = sync_erase(ubi, e, false); + if (err) + goto out_free; + + wl_tree_add(e, &ubi->free); + ubi->free_count++; + } else { + err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false); + if (err) + goto out_free; + } + + return 0; + +out_free: + wl_entry_destroy(ubi, e); + + return err; +} + /** * ubi_wl_init - initialize the WL sub-system using attaching information. * @ubi: UBI device description object @@ -1566,18 +1606,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { cond_resched(); - e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); - if (!e) + err = erase_aeb(ubi, aeb, false); + if (err) goto out_free; - e->pnum = aeb->pnum; - e->ec = aeb->ec; - ubi->lookuptbl[e->pnum] = e; - if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) { - wl_entry_destroy(ubi, e); - goto out_free; - } - found_pebs++; } @@ -1635,6 +1667,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) ubi_assert(!ubi->lookuptbl[e->pnum]); ubi->lookuptbl[e->pnum] = e; } else { + bool sync = false; + /* * Usually old Fastmap PEBs are scheduled for erasure * and we don't have to care about them but if we face @@ -1644,18 +1678,21 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) if (ubi->lookuptbl[aeb->pnum]) continue; - e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); - if (!e) - goto out_free; + /* + * The fastmap update code might not find a free PEB for + * writing the fastmap anchor to and then reuses the + * current fastmap anchor PEB. When this PEB gets erased + * and a power cut happens before it is written again we + * must make sure that the fastmap attach code doesn't + * find any outdated fastmap anchors, hence we erase the + * outdated fastmap anchor PEBs synchronously here. + */ + if (aeb->vol_id == UBI_FM_SB_VOLUME_ID) + sync = true; - e->pnum = aeb->pnum; - e->ec = aeb->ec; - ubi_assert(!ubi->lookuptbl[e->pnum]); - ubi->lookuptbl[e->pnum] = e; - if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) { - wl_entry_destroy(ubi, e); + err = erase_aeb(ubi, aeb, sync); + if (err) goto out_free; - } } found_pebs++; From e9a35f8b73f10c605fba4548957c8317228848c7 Mon Sep 17 00:00:00 2001 From: Bradley Bolen Date: Thu, 18 Jan 2018 08:55:20 -0500 Subject: [PATCH 1884/3326] ubi: block: Fix locking for idr_alloc/idr_remove commit 7f29ae9f977bcdc3654e68bc36d170223c52fd48 upstream. This fixes a race with idr_alloc where gd->first_minor can be set to the same value for two simultaneous calls to ubiblock_create. Each instance calls device_add_disk with the same first_minor. device_add_disk calls bdi_register_owner which generates several warnings. WARNING: CPU: 1 PID: 179 at kernel-source/fs/sysfs/dir.c:31 sysfs_warn_dup+0x68/0x88 sysfs: cannot create duplicate filename '/devices/virtual/bdi/252:2' WARNING: CPU: 1 PID: 179 at kernel-source/lib/kobject.c:240 kobject_add_internal+0x1ec/0x2f8 kobject_add_internal failed for 252:2 with -EEXIST, don't try to register things with the same name in the same directory WARNING: CPU: 1 PID: 179 at kernel-source/fs/sysfs/dir.c:31 sysfs_warn_dup+0x68/0x88 sysfs: cannot create duplicate filename '/dev/block/252:2' However, device_add_disk does not error out when bdi_register_owner returns an error. Control continues until reaching blk_register_queue. It then BUGs. kernel BUG at kernel-source/fs/sysfs/group.c:113! [] (internal_create_group) from [] (sysfs_create_group+0x20/0x24) [] (sysfs_create_group) from [] (blk_trace_init_sysfs+0x18/0x20) [] (blk_trace_init_sysfs) from [] (blk_register_queue+0xd8/0x154) [] (blk_register_queue) from [] (device_add_disk+0x194/0x44c) [] (device_add_disk) from [] (ubiblock_create+0x284/0x2e0) [] (ubiblock_create) from [] (vol_cdev_ioctl+0x450/0x554) [] (vol_cdev_ioctl) from [] (vfs_ioctl+0x30/0x44) [] (vfs_ioctl) from [] (do_vfs_ioctl+0xa0/0x790) [] (do_vfs_ioctl) from [] (SyS_ioctl+0x44/0x68) [] (SyS_ioctl) from [] (ret_fast_syscall+0x0/0x34) Locking idr_alloc/idr_remove removes the race and keeps gd->first_minor unique. Fixes: 2bf50d42f3a4 ("UBI: block: Dynamically allocate minor numbers") Signed-off-by: Bradley Bolen Reviewed-by: Boris Brezillon Signed-off-by: Richard Weinberger Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/ubi/block.c | 42 +++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index b210fdb31c9831..b1fc28f63882e7 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -99,6 +99,8 @@ struct ubiblock { /* Linked list of all ubiblock instances */ static LIST_HEAD(ubiblock_devices); +static DEFINE_IDR(ubiblock_minor_idr); +/* Protects ubiblock_devices and ubiblock_minor_idr */ static DEFINE_MUTEX(devices_mutex); static int ubiblock_major; @@ -351,8 +353,6 @@ static const struct blk_mq_ops ubiblock_mq_ops = { .init_request = ubiblock_init_request, }; -static DEFINE_IDR(ubiblock_minor_idr); - int ubiblock_create(struct ubi_volume_info *vi) { struct ubiblock *dev; @@ -365,14 +365,15 @@ int ubiblock_create(struct ubi_volume_info *vi) /* Check that the volume isn't already handled */ mutex_lock(&devices_mutex); if (find_dev_nolock(vi->ubi_num, vi->vol_id)) { - mutex_unlock(&devices_mutex); - return -EEXIST; + ret = -EEXIST; + goto out_unlock; } - mutex_unlock(&devices_mutex); dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL); - if (!dev) - return -ENOMEM; + if (!dev) { + ret = -ENOMEM; + goto out_unlock; + } mutex_init(&dev->dev_mutex); @@ -437,14 +438,13 @@ int ubiblock_create(struct ubi_volume_info *vi) goto out_free_queue; } - mutex_lock(&devices_mutex); list_add_tail(&dev->list, &ubiblock_devices); - mutex_unlock(&devices_mutex); /* Must be the last step: anyone can call file ops from now on */ add_disk(dev->gd); dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)", dev->ubi_num, dev->vol_id, vi->name); + mutex_unlock(&devices_mutex); return 0; out_free_queue: @@ -457,6 +457,8 @@ int ubiblock_create(struct ubi_volume_info *vi) put_disk(dev->gd); out_free_dev: kfree(dev); +out_unlock: + mutex_unlock(&devices_mutex); return ret; } @@ -478,30 +480,36 @@ static void ubiblock_cleanup(struct ubiblock *dev) int ubiblock_remove(struct ubi_volume_info *vi) { struct ubiblock *dev; + int ret; mutex_lock(&devices_mutex); dev = find_dev_nolock(vi->ubi_num, vi->vol_id); if (!dev) { - mutex_unlock(&devices_mutex); - return -ENODEV; + ret = -ENODEV; + goto out_unlock; } /* Found a device, let's lock it so we can check if it's busy */ mutex_lock(&dev->dev_mutex); if (dev->refcnt > 0) { - mutex_unlock(&dev->dev_mutex); - mutex_unlock(&devices_mutex); - return -EBUSY; + ret = -EBUSY; + goto out_unlock_dev; } /* Remove from device list */ list_del(&dev->list); - mutex_unlock(&devices_mutex); - ubiblock_cleanup(dev); mutex_unlock(&dev->dev_mutex); + mutex_unlock(&devices_mutex); + kfree(dev); return 0; + +out_unlock_dev: + mutex_unlock(&dev->dev_mutex); +out_unlock: + mutex_unlock(&devices_mutex); + return ret; } static int ubiblock_resize(struct ubi_volume_info *vi) @@ -630,6 +638,7 @@ static void ubiblock_remove_all(void) struct ubiblock *next; struct ubiblock *dev; + mutex_lock(&devices_mutex); list_for_each_entry_safe(dev, next, &ubiblock_devices, list) { /* The module is being forcefully removed */ WARN_ON(dev->desc); @@ -638,6 +647,7 @@ static void ubiblock_remove_all(void) ubiblock_cleanup(dev); kfree(dev); } + mutex_unlock(&devices_mutex); } int __init ubiblock_init(void) From 068e53f9b1921bd6a3ed2340dc5d85792bdde089 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 11 Jan 2018 23:27:00 -0500 Subject: [PATCH 1885/3326] ubifs: free the encrypted symlink target commit 6b46d444146eb8d0b99562795cea8086639d7282 upstream. ubifs_symlink() forgot to free the kmalloc()'ed buffer holding the encrypted symlink target, creating a memory leak. Fix it. (UBIFS could actually encrypt directly into ui->data, removing the temporary buffer, but that is left for the patch that switches to use the symlink helper functions.) Fixes: ca7f85be8d6c ("ubifs: Add support for encrypted symlinks") Signed-off-by: Eric Biggers Signed-off-by: Theodore Ts'o Signed-off-by: Greg Kroah-Hartman --- fs/ubifs/dir.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 417fe0b29f23e9..ef820f8031765c 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -1216,10 +1216,8 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, ostr.len = disk_link.len; err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr); - if (err) { - kfree(sd); + if (err) goto out_inode; - } sd->len = cpu_to_le16(ostr.len); disk_link.name = (char *)sd; @@ -1251,11 +1249,10 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, goto out_cancel; mutex_unlock(&dir_ui->ui_mutex); - ubifs_release_budget(c, &req); insert_inode_hash(inode); d_instantiate(dentry, inode); - fscrypt_free_filename(&nm); - return 0; + err = 0; + goto out_fname; out_cancel: dir->i_size -= sz_change; @@ -1268,6 +1265,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, fscrypt_free_filename(&nm); out_budg: ubifs_release_budget(c, &req); + kfree(sd); return err; } From ace34428faa6be2098c9ac295394c11e65db2263 Mon Sep 17 00:00:00 2001 From: Scott Mayhew Date: Fri, 15 Dec 2017 16:12:32 -0500 Subject: [PATCH 1886/3326] nfs/pnfs: fix nfs_direct_req ref leak when i/o falls back to the mds commit ba4a76f703ab7eb72941fdaac848502073d6e9ee upstream. Currently when falling back to doing I/O through the MDS (via pnfs_{read|write}_through_mds), the client frees the nfs_pgio_header without releasing the reference taken on the dreq via pnfs_generic_pg_{read|write}pages -> nfs_pgheader_init -> nfs_direct_pgio_init. It then takes another reference on the dreq via nfs_generic_pg_pgios -> nfs_pgheader_init -> nfs_direct_pgio_init and as a result the requester will become stuck in inode_dio_wait. Once that happens, other processes accessing the inode will become stuck as well. Ensure that pnfs_read_through_mds() and pnfs_write_through_mds() clean up correctly by calling hdr->completion_ops->completion() instead of calling hdr->release() directly. This can be reproduced (sometimes) by performing "storage failover takeover" commands on NetApp filer while doing direct I/O from a client. This can also be reproduced using SystemTap to simulate a failure while doing direct I/O from a client (from Dave Wysochanski ): stap -v -g -e 'probe module("nfs_layout_nfsv41_files").function("nfs4_fl_prepare_ds").return { $return=NULL; exit(); }' Suggested-by: Trond Myklebust Signed-off-by: Scott Mayhew Fixes: 1ca018d28d ("pNFS: Fix a memory leak when attempted pnfs fails") Signed-off-by: Trond Myklebust Signed-off-by: Greg Kroah-Hartman --- fs/nfs/pnfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 3bcd669a315237..5f2f852ef50630 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -2237,7 +2237,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc, nfs_pageio_reset_write_mds(desc); mirror->pg_recoalesce = 1; } - hdr->release(hdr); + hdr->completion_ops->completion(hdr); } static enum pnfs_try_status @@ -2360,7 +2360,7 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc, nfs_pageio_reset_read_mds(desc); mirror->pg_recoalesce = 1; } - hdr->release(hdr); + hdr->completion_ops->completion(hdr); } /* From 3ac2d17a6ed38d3e662e56ad79e0ba1a98247830 Mon Sep 17 00:00:00 2001 From: Tigran Mkrtchyan Date: Tue, 16 Jan 2018 22:38:50 +0100 Subject: [PATCH 1887/3326] nfs41: do not return ENOMEM on LAYOUTUNAVAILABLE commit 7ff4cff637aa0bd2abbd81f53b2a6206c50afd95 upstream. A pNFS server may return LAYOUTUNAVAILABLE error on LAYOUTGET for files which don't have any layout. In this situation pnfs_update_layout currently returns NULL. As this NULL is converted into ENOMEM, IO requests fails instead of falling back to MDS. Do not return ENOMEM on LAYOUTUNAVAILABLE and let client retry through MDS. Fixes 8d40b0f14846f. I will suggest to backport this fix to affected stable branches. Signed-off-by: Tigran Mkrtchyan [trondmy: Use IS_ERR_OR_NULL()] Fixes: 8d40b0f14846 ("NFS filelayout:call GETDEVICEINFO after...") Signed-off-by: Trond Myklebust Signed-off-by: Greg Kroah-Hartman --- fs/nfs/filelayout/filelayout.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 508126eb49f957..3db2b74647483a 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -895,9 +895,7 @@ fl_pnfs_update_layout(struct inode *ino, lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode, gfp_flags); - if (!lseg) - lseg = ERR_PTR(-ENOMEM); - if (IS_ERR(lseg)) + if (IS_ERR_OR_NULL(lseg)) goto out; lo = NFS_I(ino)->layout; From 3d03af006438b91dab7c1feb474a5c899554acac Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 18 Dec 2017 14:39:13 -0500 Subject: [PATCH 1888/3326] NFS: Add a cond_resched() to nfs_commit_release_pages() commit 7f1bda447c9bd48b415acedba6b830f61591601f upstream. The commit list can get very large, and so we need a cond_resched() in nfs_commit_release_pages() in order to ensure we don't hog the CPU for excessive periods of time. Reported-by: Mike Galbraith Signed-off-by: Trond Myklebust Signed-off-by: Greg Kroah-Hartman --- fs/nfs/write.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index de325804941d1f..76da415be39a57 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1836,6 +1836,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); next: nfs_unlock_and_release_request(req); + /* Latency breaker */ + cond_resched(); } nfss = NFS_SERVER(data->inode); if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) From 6421f29eb8af8992fd8a85823b435d44bb055b1c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 6 Jan 2018 09:53:49 -0500 Subject: [PATCH 1889/3326] NFS: Fix nfsstat breakage due to LOOKUPP commit 8634ef5e05311f32d7f2aee06f6b27a8834a3bd6 upstream. The LOOKUPP operation was inserted into the nfs4_procedures array rather than being appended, which put /proc/net/rpc/nfs out of whack, and broke the nfsstat utility. Fix by moving the LOOKUPP operation to the end of the array, and by ensuring that it keeps the same length whether or not NFSV4.1 and NFSv4.2 are compiled in. Fixes: 5b5faaf6df734 ("nfs4: add NFSv4 LOOKUPP handlers") Signed-off-by: Trond Myklebust Signed-off-by: Greg Kroah-Hartman --- fs/nfs/nfs4xdr.c | 64 ++++++++++++++++++++++++++------------------ include/linux/nfs4.h | 12 ++++++--- 2 files changed, 46 insertions(+), 30 deletions(-) diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 14ed9791ec9cf1..549c916d28599f 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -7668,6 +7668,22 @@ nfs4_stat_to_errno(int stat) .p_name = #proc, \ } +#if defined(CONFIG_NFS_V4_1) +#define PROC41(proc, argtype, restype) \ + PROC(proc, argtype, restype) +#else +#define PROC41(proc, argtype, restype) \ + STUB(proc) +#endif + +#if defined(CONFIG_NFS_V4_2) +#define PROC42(proc, argtype, restype) \ + PROC(proc, argtype, restype) +#else +#define PROC42(proc, argtype, restype) \ + STUB(proc) +#endif + const struct rpc_procinfo nfs4_procedures[] = { PROC(READ, enc_read, dec_read), PROC(WRITE, enc_write, dec_write), @@ -7688,7 +7704,6 @@ const struct rpc_procinfo nfs4_procedures[] = { PROC(ACCESS, enc_access, dec_access), PROC(GETATTR, enc_getattr, dec_getattr), PROC(LOOKUP, enc_lookup, dec_lookup), - PROC(LOOKUPP, enc_lookupp, dec_lookupp), PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root), PROC(REMOVE, enc_remove, dec_remove), PROC(RENAME, enc_rename, dec_rename), @@ -7707,33 +7722,30 @@ const struct rpc_procinfo nfs4_procedures[] = { PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner), PROC(SECINFO, enc_secinfo, dec_secinfo), PROC(FSID_PRESENT, enc_fsid_present, dec_fsid_present), -#if defined(CONFIG_NFS_V4_1) - PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id), - PROC(CREATE_SESSION, enc_create_session, dec_create_session), - PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session), - PROC(SEQUENCE, enc_sequence, dec_sequence), - PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time), - PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete), - PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo), - PROC(LAYOUTGET, enc_layoutget, dec_layoutget), - PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit), - PROC(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn), - PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name), - PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid), - PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid), + PROC41(EXCHANGE_ID, enc_exchange_id, dec_exchange_id), + PROC41(CREATE_SESSION, enc_create_session, dec_create_session), + PROC41(DESTROY_SESSION, enc_destroy_session, dec_destroy_session), + PROC41(SEQUENCE, enc_sequence, dec_sequence), + PROC41(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time), + PROC41(RECLAIM_COMPLETE,enc_reclaim_complete, dec_reclaim_complete), + PROC41(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo), + PROC41(LAYOUTGET, enc_layoutget, dec_layoutget), + PROC41(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit), + PROC41(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn), + PROC41(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name), + PROC41(TEST_STATEID, enc_test_stateid, dec_test_stateid), + PROC41(FREE_STATEID, enc_free_stateid, dec_free_stateid), STUB(GETDEVICELIST), - PROC(BIND_CONN_TO_SESSION, + PROC41(BIND_CONN_TO_SESSION, enc_bind_conn_to_session, dec_bind_conn_to_session), - PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid), -#endif /* CONFIG_NFS_V4_1 */ -#ifdef CONFIG_NFS_V4_2 - PROC(SEEK, enc_seek, dec_seek), - PROC(ALLOCATE, enc_allocate, dec_allocate), - PROC(DEALLOCATE, enc_deallocate, dec_deallocate), - PROC(LAYOUTSTATS, enc_layoutstats, dec_layoutstats), - PROC(CLONE, enc_clone, dec_clone), - PROC(COPY, enc_copy, dec_copy), -#endif /* CONFIG_NFS_V4_2 */ + PROC41(DESTROY_CLIENTID,enc_destroy_clientid, dec_destroy_clientid), + PROC42(SEEK, enc_seek, dec_seek), + PROC42(ALLOCATE, enc_allocate, dec_allocate), + PROC42(DEALLOCATE, enc_deallocate, dec_deallocate), + PROC42(LAYOUTSTATS, enc_layoutstats, dec_layoutstats), + PROC42(CLONE, enc_clone, dec_clone), + PROC42(COPY, enc_copy, dec_copy), + PROC(LOOKUPP, enc_lookupp, dec_lookupp), }; static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)]; diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 47adac640191f8..57ffaa20d5649f 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -457,7 +457,12 @@ enum lock_type4 { #define NFS4_DEBUG 1 -/* Index of predefined Linux client operations */ +/* + * Index of predefined Linux client operations + * + * To ensure that /proc/net/rpc/nfs remains correctly ordered, please + * append only to this enum when adding new client operations. + */ enum { NFSPROC4_CLNT_NULL = 0, /* Unused */ @@ -480,7 +485,6 @@ enum { NFSPROC4_CLNT_ACCESS, NFSPROC4_CLNT_GETATTR, NFSPROC4_CLNT_LOOKUP, - NFSPROC4_CLNT_LOOKUPP, NFSPROC4_CLNT_LOOKUP_ROOT, NFSPROC4_CLNT_REMOVE, NFSPROC4_CLNT_RENAME, @@ -500,7 +504,6 @@ enum { NFSPROC4_CLNT_SECINFO, NFSPROC4_CLNT_FSID_PRESENT, - /* nfs41 */ NFSPROC4_CLNT_EXCHANGE_ID, NFSPROC4_CLNT_CREATE_SESSION, NFSPROC4_CLNT_DESTROY_SESSION, @@ -518,13 +521,14 @@ enum { NFSPROC4_CLNT_BIND_CONN_TO_SESSION, NFSPROC4_CLNT_DESTROY_CLIENTID, - /* nfs42 */ NFSPROC4_CLNT_SEEK, NFSPROC4_CLNT_ALLOCATE, NFSPROC4_CLNT_DEALLOCATE, NFSPROC4_CLNT_LAYOUTSTATS, NFSPROC4_CLNT_CLONE, NFSPROC4_CLNT_COPY, + + NFSPROC4_CLNT_LOOKUPP, }; /* nfs41 types */ From 60017643c21060b600faeae9a58e450804ea288d Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 16 Jan 2018 10:08:00 -0500 Subject: [PATCH 1890/3326] NFS: commit direct writes even if they fail partially commit 1b8d97b0a837beaf48a8449955b52c650a7114b4 upstream. If some of the WRITE calls making up an O_DIRECT write syscall fail, we neglect to commit, even if some of the WRITEs succeed. We also depend on the commit code to free the reference count on the nfs_page taken in the "if (request_commit)" case at the end of nfs_direct_write_completion(). The problem was originally noticed because ENOSPC's encountered partway through a write would result in a closed file being sillyrenamed when it should have been unlinked. Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust Signed-off-by: Greg Kroah-Hartman --- fs/nfs/direct.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index d2972d53746950..8c10b0562e75d2 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -775,10 +775,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) spin_lock(&dreq->lock); - if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { - dreq->flags = 0; + if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) dreq->error = hdr->error; - } if (dreq->error == 0) { nfs_direct_good_bytes(dreq, hdr); if (nfs_write_need_commit(hdr)) { From 1c38ad2f26d8511cde75ce5767313e8c3e789d69 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 19 Jan 2018 15:15:34 -0800 Subject: [PATCH 1891/3326] NFS: reject request for id_legacy key without auxdata commit 49686cbbb3ebafe42e63868222f269d8053ead00 upstream. nfs_idmap_legacy_upcall() is supposed to be called with 'aux' pointing to a 'struct idmap', via the call to request_key_with_auxdata() in nfs_idmap_request_key(). However it can also be reached via the request_key() system call in which case 'aux' will be NULL, causing a NULL pointer dereference in nfs_idmap_prepare_pipe_upcall(), assuming that the key description is valid enough to get that far. Fix this by making nfs_idmap_legacy_upcall() negate the key if no auxdata is provided. As usual, this bug was found by syzkaller. A simple reproducer using the command-line keyctl program is: keyctl request2 id_legacy uid:0 '' @s Fixes: 57e62324e469 ("NFS: Store the legacy idmapper result in the keyring") Reported-by: syzbot+5dfdbcf7b3eb5912abbb@syzkaller.appspotmail.com Signed-off-by: Eric Biggers Signed-off-by: Trond Myklebust Signed-off-by: Greg Kroah-Hartman --- fs/nfs/nfs4idmap.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index 30426c1a1bbda2..22dc30a679a036 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -568,9 +568,13 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, struct idmap_msg *im; struct idmap *idmap = (struct idmap *)aux; struct key *key = cons->key; - int ret = -ENOMEM; + int ret = -ENOKEY; + + if (!aux) + goto out1; /* msg and im are freed in idmap_pipe_destroy_msg */ + ret = -ENOMEM; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) goto out1; From f1e31607e73d5f4546ff8d1e40e1153230392188 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 28 Jan 2018 09:29:41 -0500 Subject: [PATCH 1892/3326] NFS: Fix a race between mmap() and O_DIRECT commit e231c6879cfd44e4fffd384bb6dd7d313249a523 upstream. When locking the file in order to do O_DIRECT on it, we must unmap any mmapped ranges on the pagecache so that we can flush out the dirty data. Fixes: a5864c999de67 ("NFS: Do not serialise O_DIRECT reads and writes") Signed-off-by: Trond Myklebust Signed-off-by: Greg Kroah-Hartman --- fs/nfs/io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfs/io.c b/fs/nfs/io.c index 20fef85d2bb157..9034b4926909a9 100644 --- a/fs/nfs/io.c +++ b/fs/nfs/io.c @@ -99,7 +99,7 @@ static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode) { if (!test_bit(NFS_INO_ODIRECT, &nfsi->flags)) { set_bit(NFS_INO_ODIRECT, &nfsi->flags); - nfs_wb_all(inode); + nfs_sync_mapping(inode->i_mapping); } } From 650d3d8512c6c542d7c11ece049b46d5f2337ce8 Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Fri, 19 Jan 2018 09:18:54 +0100 Subject: [PATCH 1893/3326] kernfs: fix regression in kernfs_fop_write caused by wrong type commit ba87977a49913129962af8ac35b0e13e0fa4382d upstream. Commit b7ce40cff0b9 ("kernfs: cache atomic_write_len in kernfs_open_file") changes type of local variable 'len' from ssize_t to size_t. This change caused that the *ppos value is updated also when the previous write callback failed. Mentioned snippet: ... len = ops->write(...); <- return value can be negative ... if (len > 0) <- true here in this case *ppos += len; ... Fixes: b7ce40cff0b9 ("kernfs: cache atomic_write_len in kernfs_open_file") Acked-by: Tejun Heo Signed-off-by: Ivan Vecera Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- fs/kernfs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 9698e51656b10b..d8f49c412f508d 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -275,7 +275,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf, { struct kernfs_open_file *of = kernfs_of(file); const struct kernfs_ops *ops; - size_t len; + ssize_t len; char *buf; if (of->atomic_write_len) { From e985f7c8ac3801ce6b684d1b248f63701190154b Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 6 Dec 2017 16:41:08 +0100 Subject: [PATCH 1894/3326] ahci: Annotate PCI ids for mobile Intel chipsets as such commit ca1b4974bd237f2373b0e980b11957aac3499b56 upstream. Intel uses different SATA PCI ids for the Desktop and Mobile SKUs of their chipsets. For older models the comment describing which chipset the PCI id is for, aksi indicates when we're dealing with a mobile SKU. Extend the comments for recent chipsets to also indicate mobile SKUs. The information this commit adds comes from Intel's chipset datasheets. This commit is a preparation patch for allowing a different default sata link powermanagement policy for mobile chipsets. Signed-off-by: Hans de Goede Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- drivers/ata/ahci.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 9f78bb03bb763c..3095bd291cfa00 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -267,9 +267,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ - { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */ + { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH M AHCI */ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ - { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ + { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH M RAID */ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */ @@ -292,9 +292,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ - { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ + { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT M AHCI */ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ - { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ + { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT M RAID */ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ @@ -303,20 +303,20 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */ - { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */ + { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point M AHCI */ { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */ - { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */ + { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point M RAID */ { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */ - { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */ + { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point M AHCI */ { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */ - { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point M RAID */ { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */ - { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point M RAID */ { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */ - { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point M RAID */ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */ @@ -357,21 +357,21 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */ - { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */ + { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series M AHCI */ { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */ - { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series M RAID */ { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */ - { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series M RAID */ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ - { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series M RAID */ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */ - { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ + { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H M AHCI */ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */ - { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ + { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H M RAID */ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/ From 53c5963da84337b71abff8b85291cd25a1002c17 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 6 Dec 2017 16:41:09 +0100 Subject: [PATCH 1895/3326] ahci: Add PCI ids for Intel Bay Trail, Cherry Trail and Apollo Lake AHCI commit 998008b779e424bd7513c434d0ab9c1268459009 upstream. Add PCI ids for Intel Bay Trail, Cherry Trail and Apollo Lake AHCI SATA controllers. This commit is a preparation patch for allowing a different default sata link powermanagement policy for mobile chipsets. Signed-off-by: Hans de Goede Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- drivers/ata/ahci.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 3095bd291cfa00..5336e569386e59 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -385,6 +385,10 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0x0f22), board_ahci }, /* Bay Trail AHCI */ + { PCI_VDEVICE(INTEL, 0x0f23), board_ahci }, /* Bay Trail AHCI */ + { PCI_VDEVICE(INTEL, 0x22a3), board_ahci }, /* Cherry Trail AHCI */ + { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci }, /* Apollo Lake AHCI */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, From 449704d3ba6f45585da8cc033af2e3178d7344a7 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Thu, 11 Jan 2018 15:55:50 +0300 Subject: [PATCH 1896/3326] ahci: Add Intel Cannon Lake PCH-H PCI ID commit f919dde0772a894c693a1eeabc77df69d6a9b937 upstream. Add Intel Cannon Lake PCH-H PCI ID to the list of supported controllers. Signed-off-by: Mika Westerberg Signed-off-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- drivers/ata/ahci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 5336e569386e59..bc013f757d5daa 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -385,6 +385,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci }, /* Bay Trail AHCI */ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci }, /* Bay Trail AHCI */ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci }, /* Cherry Trail AHCI */ From b8b32e2e68f0a3dd67edb34710964dc3e67bdf3b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:22 -0800 Subject: [PATCH 1897/3326] crypto: hash - introduce crypto_hash_alg_has_setkey() commit cd6ed77ad5d223dc6299fb58f62e0f5267f7e2ba upstream. Templates that use an shash spawn can use crypto_shash_alg_has_setkey() to determine whether the underlying algorithm requires a key or not. But there was no corresponding function for ahash spawns. Add it. Note that the new function actually has to support both shash and ahash algorithms, since the ahash API can be used with either. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/ahash.c | 11 +++++++++++ include/crypto/internal/hash.h | 2 ++ 2 files changed, 13 insertions(+) diff --git a/crypto/ahash.c b/crypto/ahash.c index 5e8666e6ccaeaa..63f3fd3a788e82 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -655,5 +655,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(ahash_attr_alg); +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) +{ + struct crypto_alg *alg = &halg->base; + + if (alg->cra_type != &crypto_ahash_type) + return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); + + return __crypto_ahash_alg(alg)->setkey != NULL; +} +EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index c2bae8da642cba..27040a46d50aab 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -90,6 +90,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) return alg->setkey != shash_no_setkey; } +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); + int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, struct hash_alg_common *alg, struct crypto_instance *inst); From ed7b0af0ca02877cb4e23546da0d44dd49156bd7 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:23 -0800 Subject: [PATCH 1898/3326] crypto: cryptd - pass through absence of ->setkey() commit 841a3ff329713f796a63356fef6e2f72e4a3f6a3 upstream. When the cryptd template is used to wrap an unkeyed hash algorithm, don't install a ->setkey() method to the cryptd instance. This change is necessary for cryptd to keep working with unkeyed hash algorithms once we start enforcing that ->setkey() is called when present. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/cryptd.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 0508c48a45c4fc..d5182e7b2a9cd5 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -913,7 +913,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.finup = cryptd_hash_finup_enqueue; inst->alg.export = cryptd_hash_export; inst->alg.import = cryptd_hash_import; - inst->alg.setkey = cryptd_hash_setkey; + if (crypto_shash_alg_has_setkey(salg)) + inst->alg.setkey = cryptd_hash_setkey; inst->alg.digest = cryptd_hash_digest_enqueue; err = ahash_register_instance(tmpl, inst); From 16210524c424a5df0cdd9b2b6aaee037f995f6ad Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:24 -0800 Subject: [PATCH 1899/3326] crypto: mcryptd - pass through absence of ->setkey() commit fa59b92d299f2787e6bae1ff078ee0982e80211f upstream. When the mcryptd template is used to wrap an unkeyed hash algorithm, don't install a ->setkey() method to the mcryptd instance. This change is necessary for mcryptd to keep working with unkeyed hash algorithms once we start enforcing that ->setkey() is called when present. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/mcryptd.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index eca04d3729b37c..6b4dd4b769426a 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -535,7 +535,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.finup = mcryptd_hash_finup_enqueue; inst->alg.export = mcryptd_hash_export; inst->alg.import = mcryptd_hash_import; - inst->alg.setkey = mcryptd_hash_setkey; + if (crypto_hash_alg_has_setkey(halg)) + inst->alg.setkey = mcryptd_hash_setkey; inst->alg.digest = mcryptd_hash_digest_enqueue; err = ahash_register_instance(tmpl, inst); From b806c0cc4c73511be382da411effdbb96f15919a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:25 -0800 Subject: [PATCH 1900/3326] crypto: poly1305 - remove ->setkey() method commit a16e772e664b9a261424107784804cffc8894977 upstream. Since Poly1305 requires a nonce per invocation, the Linux kernel implementations of Poly1305 don't use the crypto API's keying mechanism and instead expect the key and nonce as the first 32 bytes of the data. But ->setkey() is still defined as a stub returning an error code. This prevents Poly1305 from being used through AF_ALG and will also break it completely once we start enforcing that all crypto API users (not just AF_ALG) call ->setkey() if present. Fix it by removing crypto_poly1305_setkey(), leaving ->setkey as NULL. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- arch/x86/crypto/poly1305_glue.c | 1 - crypto/poly1305_generic.c | 17 +++++------------ include/crypto/poly1305.h | 2 -- 3 files changed, 5 insertions(+), 15 deletions(-) diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c index e32142bc071d93..28c372003e44c1 100644 --- a/arch/x86/crypto/poly1305_glue.c +++ b/arch/x86/crypto/poly1305_glue.c @@ -164,7 +164,6 @@ static struct shash_alg alg = { .init = poly1305_simd_init, .update = poly1305_simd_update, .final = crypto_poly1305_final, - .setkey = crypto_poly1305_setkey, .descsize = sizeof(struct poly1305_simd_desc_ctx), .base = { .cra_name = "poly1305", diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index b1c2d57dc734a3..ba39eb308c791d 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c @@ -47,17 +47,6 @@ int crypto_poly1305_init(struct shash_desc *desc) } EXPORT_SYMBOL_GPL(crypto_poly1305_init); -int crypto_poly1305_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen) -{ - /* Poly1305 requires a unique key for each tag, which implies that - * we can't set it on the tfm that gets accessed by multiple users - * simultaneously. Instead we expect the key as the first 32 bytes in - * the update() call. */ - return -ENOTSUPP; -} -EXPORT_SYMBOL_GPL(crypto_poly1305_setkey); - static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) { /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ @@ -76,6 +65,11 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key) dctx->s[3] = get_unaligned_le32(key + 12); } +/* + * Poly1305 requires a unique key for each tag, which implies that we can't set + * it on the tfm that gets accessed by multiple users simultaneously. Instead we + * expect the key as the first 32 bytes in the update() call. + */ unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen) { @@ -281,7 +275,6 @@ static struct shash_alg poly1305_alg = { .init = crypto_poly1305_init, .update = crypto_poly1305_update, .final = crypto_poly1305_final, - .setkey = crypto_poly1305_setkey, .descsize = sizeof(struct poly1305_desc_ctx), .base = { .cra_name = "poly1305", diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h index c65567d01e8e73..f718a19da82f75 100644 --- a/include/crypto/poly1305.h +++ b/include/crypto/poly1305.h @@ -31,8 +31,6 @@ struct poly1305_desc_ctx { }; int crypto_poly1305_init(struct shash_desc *desc); -int crypto_poly1305_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen); unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen); int crypto_poly1305_update(struct shash_desc *desc, From 8d906d183bce4b0eb812e5766ee5016f16dc201c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:26 -0800 Subject: [PATCH 1901/3326] crypto: hash - annotate algorithms taking optional key commit a208fa8f33031b9e0aba44c7d1b7e68eb0cbd29e upstream. We need to consistently enforce that keyed hashes cannot be used without setting the key. To do this we need a reliable way to determine whether a given hash algorithm is keyed or not. AF_ALG currently does this by checking for the presence of a ->setkey() method. However, this is actually slightly broken because the CRC-32 algorithms implement ->setkey() but can also be used without a key. (The CRC-32 "key" is not actually a cryptographic key but rather represents the initial state. If not overridden, then a default initial state is used.) Prepare to fix this by introducing a flag CRYPTO_ALG_OPTIONAL_KEY which indicates that the algorithm has a ->setkey() method, but it is not required to be called. Then set it on all the CRC-32 algorithms. The same also applies to the Adler-32 implementation in Lustre. Also, the cryptd and mcryptd templates have to pass through the flag from their underlying algorithm. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- arch/arm/crypto/crc32-ce-glue.c | 2 ++ arch/arm64/crypto/crc32-ce-glue.c | 2 ++ arch/powerpc/crypto/crc32c-vpmsum_glue.c | 1 + arch/s390/crypto/crc32-vx.c | 3 +++ arch/sparc/crypto/crc32c_glue.c | 1 + arch/x86/crypto/crc32-pclmul_glue.c | 1 + arch/x86/crypto/crc32c-intel_glue.c | 1 + crypto/crc32_generic.c | 1 + crypto/crc32c_generic.c | 1 + crypto/cryptd.c | 7 +++---- crypto/mcryptd.c | 7 +++---- drivers/crypto/bfin_crc.c | 3 ++- drivers/crypto/stm32/stm32_crc32.c | 2 ++ .../staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c | 1 + include/linux/crypto.h | 6 ++++++ 15 files changed, 30 insertions(+), 9 deletions(-) diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c index 1b0e0e86ee9c20..96e62ec105d061 100644 --- a/arch/arm/crypto/crc32-ce-glue.c +++ b/arch/arm/crypto/crc32-ce-glue.c @@ -188,6 +188,7 @@ static struct shash_alg crc32_pmull_algs[] = { { .base.cra_name = "crc32", .base.cra_driver_name = "crc32-arm-ce", .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = 1, .base.cra_module = THIS_MODULE, }, { @@ -203,6 +204,7 @@ static struct shash_alg crc32_pmull_algs[] = { { .base.cra_name = "crc32c", .base.cra_driver_name = "crc32c-arm-ce", .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = 1, .base.cra_module = THIS_MODULE, } }; diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c index 624f4137918ce5..34b4e3d46aab41 100644 --- a/arch/arm64/crypto/crc32-ce-glue.c +++ b/arch/arm64/crypto/crc32-ce-glue.c @@ -185,6 +185,7 @@ static struct shash_alg crc32_pmull_algs[] = { { .base.cra_name = "crc32", .base.cra_driver_name = "crc32-arm64-ce", .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = 1, .base.cra_module = THIS_MODULE, }, { @@ -200,6 +201,7 @@ static struct shash_alg crc32_pmull_algs[] = { { .base.cra_name = "crc32c", .base.cra_driver_name = "crc32c-arm64-ce", .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = 1, .base.cra_module = THIS_MODULE, } }; diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c index f058e0c3e4d4b4..fd1d6c83f0c02d 100644 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c @@ -141,6 +141,7 @@ static struct shash_alg alg = { .cra_name = "crc32c", .cra_driver_name = "crc32c-vpmsum", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c index 992e630c227b58..6f4985f357c666 100644 --- a/arch/s390/crypto/crc32-vx.c +++ b/arch/s390/crypto/crc32-vx.c @@ -238,6 +238,7 @@ static struct shash_alg crc32_vx_algs[] = { .cra_name = "crc32", .cra_driver_name = "crc32-vx", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CRC32_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crc_ctx), .cra_module = THIS_MODULE, @@ -258,6 +259,7 @@ static struct shash_alg crc32_vx_algs[] = { .cra_name = "crc32be", .cra_driver_name = "crc32be-vx", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CRC32_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crc_ctx), .cra_module = THIS_MODULE, @@ -278,6 +280,7 @@ static struct shash_alg crc32_vx_algs[] = { .cra_name = "crc32c", .cra_driver_name = "crc32c-vx", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CRC32_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crc_ctx), .cra_module = THIS_MODULE, diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c index d1064e46efe8be..8aa664638c3c0c 100644 --- a/arch/sparc/crypto/crc32c_glue.c +++ b/arch/sparc/crypto/crc32c_glue.c @@ -133,6 +133,7 @@ static struct shash_alg alg = { .cra_name = "crc32c", .cra_driver_name = "crc32c-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_alignmask = 7, diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c index 27226df3f7d8aa..c8d9cdacbf1046 100644 --- a/arch/x86/crypto/crc32-pclmul_glue.c +++ b/arch/x86/crypto/crc32-pclmul_glue.c @@ -162,6 +162,7 @@ static struct shash_alg alg = { .cra_name = "crc32", .cra_driver_name = "crc32-pclmul", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c index c194d5717ae51e..5773e11610723f 100644 --- a/arch/x86/crypto/crc32c-intel_glue.c +++ b/arch/x86/crypto/crc32c-intel_glue.c @@ -226,6 +226,7 @@ static struct shash_alg alg = { .cra_name = "crc32c", .cra_driver_name = "crc32c-intel", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c index aa2a25fc7482a2..718cbce8d1695f 100644 --- a/crypto/crc32_generic.c +++ b/crypto/crc32_generic.c @@ -133,6 +133,7 @@ static struct shash_alg alg = { .cra_name = "crc32", .cra_driver_name = "crc32-generic", .cra_priority = 100, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c index 4c0a0e27187694..372320399622df 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c_generic.c @@ -146,6 +146,7 @@ static struct shash_alg alg = { .cra_name = "crc32c", .cra_driver_name = "crc32c-generic", .cra_priority = 100, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_alignmask = 3, .cra_ctxsize = sizeof(struct chksum_ctx), diff --git a/crypto/cryptd.c b/crypto/cryptd.c index d5182e7b2a9cd5..248f6ba4168806 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -895,10 +895,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto out_free_inst; - type = CRYPTO_ALG_ASYNC; - if (alg->cra_flags & CRYPTO_ALG_INTERNAL) - type |= CRYPTO_ALG_INTERNAL; - inst->alg.halg.base.cra_flags = type; + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | + (alg->cra_flags & (CRYPTO_ALG_INTERNAL | + CRYPTO_ALG_OPTIONAL_KEY)); inst->alg.halg.digestsize = salg->digestsize; inst->alg.halg.statesize = salg->statesize; diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index 6b4dd4b769426a..e0732d979e3b14 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -517,10 +517,9 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto out_free_inst; - type = CRYPTO_ALG_ASYNC; - if (alg->cra_flags & CRYPTO_ALG_INTERNAL) - type |= CRYPTO_ALG_INTERNAL; - inst->alg.halg.base.cra_flags = type; + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | + (alg->cra_flags & (CRYPTO_ALG_INTERNAL | + CRYPTO_ALG_OPTIONAL_KEY)); inst->alg.halg.digestsize = halg->digestsize; inst->alg.halg.statesize = halg->statesize; diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c index a118b9bed669df..bfbf8bf77f0334 100644 --- a/drivers/crypto/bfin_crc.c +++ b/drivers/crypto/bfin_crc.c @@ -494,7 +494,8 @@ static struct ahash_alg algs = { .cra_driver_name = DRIVER_NAME, .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | - CRYPTO_ALG_ASYNC, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx), .cra_alignmask = 3, diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c index 090582baecfe87..8f09b8430893c6 100644 --- a/drivers/crypto/stm32/stm32_crc32.c +++ b/drivers/crypto/stm32/stm32_crc32.c @@ -208,6 +208,7 @@ static struct shash_alg algs[] = { .cra_name = "crc32", .cra_driver_name = DRIVER_NAME, .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_alignmask = 3, .cra_ctxsize = sizeof(struct stm32_crc_ctx), @@ -229,6 +230,7 @@ static struct shash_alg algs[] = { .cra_name = "crc32c", .cra_driver_name = DRIVER_NAME, .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_alignmask = 3, .cra_ctxsize = sizeof(struct stm32_crc_ctx), diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c index db0572733712be..ab30a0f5129cf7 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c @@ -119,6 +119,7 @@ static struct shash_alg alg = { .cra_name = "adler32", .cra_driver_name = "adler32-zlib", .cra_priority = 100, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 84da9978e9516a..958023baa19f0b 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -105,6 +105,12 @@ */ #define CRYPTO_ALG_INTERNAL 0x00002000 +/* + * Set if the algorithm has a ->setkey() method but can be used without + * calling it first, i.e. there is a default key. + */ +#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 + /* * Transform masks and values (for crt_flags). */ From 2f00eb27908895b46e807b632b53824d0844012f Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:27 -0800 Subject: [PATCH 1902/3326] crypto: hash - prevent using keyed hashes without setting key commit 9fa68f620041be04720d0cbfb1bd3ddfc6310b24 upstream. Currently, almost none of the keyed hash algorithms check whether a key has been set before proceeding. Some algorithms are okay with this and will effectively just use a key of all 0's or some other bogus default. However, others will severely break, as demonstrated using "hmac(sha3-512-generic)", the unkeyed use of which causes a kernel crash via a (potentially exploitable) stack buffer overflow. A while ago, this problem was solved for AF_ALG by pairing each hash transform with a 'has_key' bool. However, there are still other places in the kernel where userspace can specify an arbitrary hash algorithm by name, and the kernel uses it as unkeyed hash without checking whether it is really unkeyed. Examples of this include: - KEYCTL_DH_COMPUTE, via the KDF extension - dm-verity - dm-crypt, via the ESSIV support - dm-integrity, via the "internal hash" mode with no key given - drbd (Distributed Replicated Block Device) This bug is especially bad for KEYCTL_DH_COMPUTE as that requires no privileges to call. Fix the bug for all users by adding a flag CRYPTO_TFM_NEED_KEY to the ->crt_flags of each hash transform that indicates whether the transform still needs to be keyed or not. Then, make the hash init, import, and digest functions return -ENOKEY if the key is still needed. The new flag also replaces the 'has_key' bool which algif_hash was previously using, thereby simplifying the algif_hash implementation. Reported-by: syzbot Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- crypto/ahash.c | 22 ++++++++++++++---- crypto/algif_hash.c | 52 +++++++++--------------------------------- crypto/shash.c | 25 ++++++++++++++++---- include/crypto/hash.h | 34 +++++++++++++++++++-------- include/linux/crypto.h | 2 ++ 5 files changed, 75 insertions(+), 60 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 63f3fd3a788e82..f75b5c1f71521c 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -193,11 +193,18 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { unsigned long alignmask = crypto_ahash_alignmask(tfm); + int err; if ((unsigned long)key & alignmask) - return ahash_setkey_unaligned(tfm, key, keylen); + err = ahash_setkey_unaligned(tfm, key, keylen); + else + err = tfm->setkey(tfm, key, keylen); + + if (err) + return err; - return tfm->setkey(tfm, key, keylen); + crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); + return 0; } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); @@ -370,7 +377,12 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { - return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_ahash_op(req, tfm->digest); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); @@ -456,7 +468,6 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) struct ahash_alg *alg = crypto_ahash_alg(hash); hash->setkey = ahash_nosetkey; - hash->has_setkey = false; hash->export = ahash_no_export; hash->import = ahash_no_import; @@ -471,7 +482,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) if (alg->setkey) { hash->setkey = alg->setkey; - hash->has_setkey = true; + if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY); } if (alg->export) hash->export = alg->export; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 5e92bd275ef38e..39cebd3256bf8d 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -34,11 +34,6 @@ struct hash_ctx { struct ahash_request req; }; -struct algif_hash_tfm { - struct crypto_ahash *hash; - bool has_key; -}; - static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) { unsigned ds; @@ -309,7 +304,7 @@ static int hash_check_key(struct socket *sock) int err = 0; struct sock *psk; struct alg_sock *pask; - struct algif_hash_tfm *tfm; + struct crypto_ahash *tfm; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); @@ -323,7 +318,7 @@ static int hash_check_key(struct socket *sock) err = -ENOKEY; lock_sock_nested(psk, SINGLE_DEPTH_NESTING); - if (!tfm->has_key) + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; if (!pask->refcnt++) @@ -414,41 +409,17 @@ static struct proto_ops algif_hash_ops_nokey = { static void *hash_bind(const char *name, u32 type, u32 mask) { - struct algif_hash_tfm *tfm; - struct crypto_ahash *hash; - - tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); - if (!tfm) - return ERR_PTR(-ENOMEM); - - hash = crypto_alloc_ahash(name, type, mask); - if (IS_ERR(hash)) { - kfree(tfm); - return ERR_CAST(hash); - } - - tfm->hash = hash; - - return tfm; + return crypto_alloc_ahash(name, type, mask); } static void hash_release(void *private) { - struct algif_hash_tfm *tfm = private; - - crypto_free_ahash(tfm->hash); - kfree(tfm); + crypto_free_ahash(private); } static int hash_setkey(void *private, const u8 *key, unsigned int keylen) { - struct algif_hash_tfm *tfm = private; - int err; - - err = crypto_ahash_setkey(tfm->hash, key, keylen); - tfm->has_key = !err; - - return err; + return crypto_ahash_setkey(private, key, keylen); } static void hash_sock_destruct(struct sock *sk) @@ -463,11 +434,10 @@ static void hash_sock_destruct(struct sock *sk) static int hash_accept_parent_nokey(void *private, struct sock *sk) { - struct hash_ctx *ctx; + struct crypto_ahash *tfm = private; struct alg_sock *ask = alg_sk(sk); - struct algif_hash_tfm *tfm = private; - struct crypto_ahash *hash = tfm->hash; - unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); + struct hash_ctx *ctx; + unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) @@ -480,7 +450,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) ask->private = ctx; - ahash_request_set_tfm(&ctx->req, hash); + ahash_request_set_tfm(&ctx->req, tfm); ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_complete, &ctx->completion); @@ -491,9 +461,9 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) static int hash_accept_parent(void *private, struct sock *sk) { - struct algif_hash_tfm *tfm = private; + struct crypto_ahash *tfm = private; - if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash)) + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; return hash_accept_parent_nokey(private, sk); diff --git a/crypto/shash.c b/crypto/shash.c index e849d3ee2e2728..5d732c6bb4b215 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -58,11 +58,18 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, { struct shash_alg *shash = crypto_shash_alg(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm); + int err; if ((unsigned long)key & alignmask) - return shash_setkey_unaligned(tfm, key, keylen); + err = shash_setkey_unaligned(tfm, key, keylen); + else + err = shash->setkey(tfm, key, keylen); + + if (err) + return err; - return shash->setkey(tfm, key, keylen); + crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); + return 0; } EXPORT_SYMBOL_GPL(crypto_shash_setkey); @@ -181,6 +188,9 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, struct shash_alg *shash = crypto_shash_alg(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm); + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + if (((unsigned long)data | (unsigned long)out) & alignmask) return shash_digest_unaligned(desc, data, len, out); @@ -360,7 +370,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) crt->digest = shash_async_digest; crt->setkey = shash_async_setkey; - crt->has_setkey = alg->setkey != shash_no_setkey; + crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & + CRYPTO_TFM_NEED_KEY); if (alg->export) crt->export = shash_async_export; @@ -375,8 +386,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) static int crypto_shash_init_tfm(struct crypto_tfm *tfm) { struct crypto_shash *hash = __crypto_shash_cast(tfm); + struct shash_alg *alg = crypto_shash_alg(hash); + + hash->descsize = alg->descsize; + + if (crypto_shash_alg_has_setkey(alg) && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY); - hash->descsize = crypto_shash_alg(hash)->descsize; return 0; } diff --git a/include/crypto/hash.h b/include/crypto/hash.h index b5727bcd233626..74827781593cf3 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -205,7 +205,6 @@ struct crypto_ahash { unsigned int keylen); unsigned int reqsize; - bool has_setkey; struct crypto_tfm base; }; @@ -405,11 +404,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req) int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen); -static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm) -{ - return tfm->has_setkey; -} - /** * crypto_ahash_finup() - update and finalize message digest * @req: reference to the ahash_request handle that holds all information @@ -481,7 +475,12 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out) */ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) { - return crypto_ahash_reqtfm(req)->import(req, in); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->import(req, in); } /** @@ -498,7 +497,12 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) */ static inline int crypto_ahash_init(struct ahash_request *req) { - return crypto_ahash_reqtfm(req)->init(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->init(req); } /** @@ -851,7 +855,12 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out) */ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) { - return crypto_shash_alg(desc->tfm)->import(desc, in); + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_shash_alg(tfm)->import(desc, in); } /** @@ -867,7 +876,12 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) */ static inline int crypto_shash_init(struct shash_desc *desc) { - return crypto_shash_alg(desc->tfm)->init(desc); + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_shash_alg(tfm)->init(desc); } /** diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 958023baa19f0b..cc36484d29e167 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -114,6 +114,8 @@ /* * Transform masks and values (for crt_flags). */ +#define CRYPTO_TFM_NEED_KEY 0x00000001 + #define CRYPTO_TFM_REQ_MASK 0x000fff00 #define CRYPTO_TFM_RES_MASK 0xfff00000 From ea96d8f676dd3caeb77e3dc509c880cc81d303a6 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 14 Feb 2018 12:44:22 +0100 Subject: [PATCH 1903/3326] media: v4l2-ioctl.c: use check_fmt for enum/g/s/try_fmt commit b2469c814fbc8f1f19676dd4912717b798df511e upstream. Don't duplicate the buffer type checks in enum/g/s/try_fmt. The check_fmt function does that already. It is hard to keep the checks in sync for all these functions and in fact the check for VBI was wrong in the _fmt functions as it allowed SDR types as well. This caused a v4l2-compliance failure for /dev/swradio0 using vivid. This simplifies the code and keeps the check in one place and fixes the SDR/VBI bug. Signed-off-by: Hans Verkuil Acked-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-ioctl.c | 140 +++++++++++---------------- 1 file changed, 54 insertions(+), 86 deletions(-) diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index b60a6b0841d12c..42e376f467293c 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1308,52 +1308,50 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_fmtdesc *p = arg; - struct video_device *vfd = video_devdata(file); - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; - bool is_rx = vfd->vfl_dir != VFL_DIR_TX; - bool is_tx = vfd->vfl_dir != VFL_DIR_RX; - int ret = -EINVAL; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; + ret = -EINVAL; switch (p->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_enum_fmt_vid_cap)) + if (unlikely(!ops->vidioc_enum_fmt_vid_cap)) break; ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg); break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_cap_mplane)) + if (unlikely(!ops->vidioc_enum_fmt_vid_cap_mplane)) break; ret = ops->vidioc_enum_fmt_vid_cap_mplane(file, fh, arg); break; case V4L2_BUF_TYPE_VIDEO_OVERLAY: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_overlay)) + if (unlikely(!ops->vidioc_enum_fmt_vid_overlay)) break; ret = ops->vidioc_enum_fmt_vid_overlay(file, fh, arg); break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_enum_fmt_vid_out)) + if (unlikely(!ops->vidioc_enum_fmt_vid_out)) break; ret = ops->vidioc_enum_fmt_vid_out(file, fh, arg); break; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_enum_fmt_vid_out_mplane)) + if (unlikely(!ops->vidioc_enum_fmt_vid_out_mplane)) break; ret = ops->vidioc_enum_fmt_vid_out_mplane(file, fh, arg); break; case V4L2_BUF_TYPE_SDR_CAPTURE: - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_enum_fmt_sdr_cap)) + if (unlikely(!ops->vidioc_enum_fmt_sdr_cap)) break; ret = ops->vidioc_enum_fmt_sdr_cap(file, fh, arg); break; case V4L2_BUF_TYPE_SDR_OUTPUT: - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_enum_fmt_sdr_out)) + if (unlikely(!ops->vidioc_enum_fmt_sdr_out)) break; ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg); break; case V4L2_BUF_TYPE_META_CAPTURE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_meta_cap)) + if (unlikely(!ops->vidioc_enum_fmt_meta_cap)) break; ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg); break; @@ -1367,13 +1365,10 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_format *p = arg; - struct video_device *vfd = video_devdata(file); - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; - bool is_rx = vfd->vfl_dir != VFL_DIR_TX; - bool is_tx = vfd->vfl_dir != VFL_DIR_RX; - int ret; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; /* * fmt can't be cleared for these overlay types due to the 'clips' @@ -1401,7 +1396,7 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, switch (p->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_g_fmt_vid_cap)) + if (unlikely(!ops->vidioc_g_fmt_vid_cap)) break; p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg); @@ -1409,23 +1404,15 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_cap_mplane)) - break; return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_overlay)) - break; return ops->vidioc_g_fmt_vid_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_vbi_cap)) - break; return ops->vidioc_g_fmt_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_cap)) - break; return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out)) + if (unlikely(!ops->vidioc_g_fmt_vid_out)) break; p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; ret = ops->vidioc_g_fmt_vid_out(file, fh, arg); @@ -1433,32 +1420,18 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_mplane)) - break; return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_overlay)) - break; return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_vbi_out)) - break; return ops->vidioc_g_fmt_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_out)) - break; return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SDR_CAPTURE: - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_g_fmt_sdr_cap)) - break; return ops->vidioc_g_fmt_sdr_cap(file, fh, arg); case V4L2_BUF_TYPE_SDR_OUTPUT: - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_g_fmt_sdr_out)) - break; return ops->vidioc_g_fmt_sdr_out(file, fh, arg); case V4L2_BUF_TYPE_META_CAPTURE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_meta_cap)) - break; return ops->vidioc_g_fmt_meta_cap(file, fh, arg); } return -EINVAL; @@ -1484,12 +1457,10 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, { struct v4l2_format *p = arg; struct video_device *vfd = video_devdata(file); - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; - bool is_rx = vfd->vfl_dir != VFL_DIR_TX; - bool is_tx = vfd->vfl_dir != VFL_DIR_RX; - int ret; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; ret = v4l_enable_media_source(vfd); if (ret) @@ -1498,37 +1469,37 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, switch (p->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_s_fmt_vid_cap)) + if (unlikely(!ops->vidioc_s_fmt_vid_cap)) break; CLEAR_AFTER_FIELD(p, fmt.pix); ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg); /* just in case the driver zeroed it again */ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; - if (is_tch) + if (vfd->vfl_type == VFL_TYPE_TOUCH) v4l_pix_format_touch(&p->fmt.pix); return ret; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_cap_mplane)) + if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_overlay)) + if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) break; CLEAR_AFTER_FIELD(p, fmt.win); return ops->vidioc_s_fmt_vid_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_vbi_cap)) + if (unlikely(!ops->vidioc_s_fmt_vbi_cap)) break; CLEAR_AFTER_FIELD(p, fmt.vbi); return ops->vidioc_s_fmt_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_cap)) + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap)) break; CLEAR_AFTER_FIELD(p, fmt.sliced); return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out)) + if (unlikely(!ops->vidioc_s_fmt_vid_out)) break; CLEAR_AFTER_FIELD(p, fmt.pix); ret = ops->vidioc_s_fmt_vid_out(file, fh, arg); @@ -1536,37 +1507,37 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_mplane)) + if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_overlay)) + if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) break; CLEAR_AFTER_FIELD(p, fmt.win); return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_vbi_out)) + if (unlikely(!ops->vidioc_s_fmt_vbi_out)) break; CLEAR_AFTER_FIELD(p, fmt.vbi); return ops->vidioc_s_fmt_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_out)) + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out)) break; CLEAR_AFTER_FIELD(p, fmt.sliced); return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SDR_CAPTURE: - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_s_fmt_sdr_cap)) + if (unlikely(!ops->vidioc_s_fmt_sdr_cap)) break; CLEAR_AFTER_FIELD(p, fmt.sdr); return ops->vidioc_s_fmt_sdr_cap(file, fh, arg); case V4L2_BUF_TYPE_SDR_OUTPUT: - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_s_fmt_sdr_out)) + if (unlikely(!ops->vidioc_s_fmt_sdr_out)) break; CLEAR_AFTER_FIELD(p, fmt.sdr); return ops->vidioc_s_fmt_sdr_out(file, fh, arg); case V4L2_BUF_TYPE_META_CAPTURE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_meta_cap)) + if (unlikely(!ops->vidioc_s_fmt_meta_cap)) break; CLEAR_AFTER_FIELD(p, fmt.meta); return ops->vidioc_s_fmt_meta_cap(file, fh, arg); @@ -1578,19 +1549,16 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_format *p = arg; - struct video_device *vfd = video_devdata(file); - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; - bool is_rx = vfd->vfl_dir != VFL_DIR_TX; - bool is_tx = vfd->vfl_dir != VFL_DIR_RX; - int ret; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; v4l_sanitize_format(p); switch (p->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_try_fmt_vid_cap)) + if (unlikely(!ops->vidioc_try_fmt_vid_cap)) break; CLEAR_AFTER_FIELD(p, fmt.pix); ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg); @@ -1598,27 +1566,27 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_cap_mplane)) + if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_overlay)) + if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) break; CLEAR_AFTER_FIELD(p, fmt.win); return ops->vidioc_try_fmt_vid_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_vbi_cap)) + if (unlikely(!ops->vidioc_try_fmt_vbi_cap)) break; CLEAR_AFTER_FIELD(p, fmt.vbi); return ops->vidioc_try_fmt_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_cap)) + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap)) break; CLEAR_AFTER_FIELD(p, fmt.sliced); return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out)) + if (unlikely(!ops->vidioc_try_fmt_vid_out)) break; CLEAR_AFTER_FIELD(p, fmt.pix); ret = ops->vidioc_try_fmt_vid_out(file, fh, arg); @@ -1626,37 +1594,37 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_mplane)) + if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_overlay)) + if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) break; CLEAR_AFTER_FIELD(p, fmt.win); return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_vbi_out)) + if (unlikely(!ops->vidioc_try_fmt_vbi_out)) break; CLEAR_AFTER_FIELD(p, fmt.vbi); return ops->vidioc_try_fmt_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_out)) + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out)) break; CLEAR_AFTER_FIELD(p, fmt.sliced); return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SDR_CAPTURE: - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_try_fmt_sdr_cap)) + if (unlikely(!ops->vidioc_try_fmt_sdr_cap)) break; CLEAR_AFTER_FIELD(p, fmt.sdr); return ops->vidioc_try_fmt_sdr_cap(file, fh, arg); case V4L2_BUF_TYPE_SDR_OUTPUT: - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_try_fmt_sdr_out)) + if (unlikely(!ops->vidioc_try_fmt_sdr_out)) break; CLEAR_AFTER_FIELD(p, fmt.sdr); return ops->vidioc_try_fmt_sdr_out(file, fh, arg); case V4L2_BUF_TYPE_META_CAPTURE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_meta_cap)) + if (unlikely(!ops->vidioc_try_fmt_meta_cap)) break; CLEAR_AFTER_FIELD(p, fmt.meta); return ops->vidioc_try_fmt_meta_cap(file, fh, arg); From 2d280dab382eef93311f089da0bb428336431208 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 14 Feb 2018 12:44:23 +0100 Subject: [PATCH 1904/3326] media: v4l2-ioctl.c: don't copy back the result for -ENOTTY commit 181a4a2d5a0a7b43cab08a70710d727e7764ccdd upstream. If the ioctl returned -ENOTTY, then don't bother copying back the result as there is no point. Signed-off-by: Hans Verkuil Acked-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-ioctl.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 42e376f467293c..d06941cc6a5571 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -2892,8 +2892,11 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, /* Handles IOCTL */ err = func(file, cmd, parg); - if (err == -ENOIOCTLCMD) + if (err == -ENOTTY || err == -ENOIOCTLCMD) { err = -ENOTTY; + goto out; + } + if (err == 0) { if (cmd == VIDIOC_DQBUF) trace_v4l2_dqbuf(video_devdata(file)->minor, parg); From 2b991eeb5cfb34838c4e984334e5975d3351c026 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 14 Feb 2018 12:44:24 +0100 Subject: [PATCH 1905/3326] media: v4l2-compat-ioctl32.c: add missing VIDIOC_PREPARE_BUF commit 3ee6d040719ae09110e5cdf24d5386abe5d1b776 upstream. The result of the VIDIOC_PREPARE_BUF ioctl was never copied back to userspace since it was missing in the switch. Signed-off-by: Hans Verkuil Acked-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 821f2aa299ae53..f6a7f8793720e7 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -1052,6 +1052,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar err = put_v4l2_create32(&karg.v2crt, up); break; + case VIDIOC_PREPARE_BUF: case VIDIOC_QUERYBUF: case VIDIOC_QBUF: case VIDIOC_DQBUF: From 0b7d6ac53606ab9991b76e95ab3e54348b3b9b89 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 14 Feb 2018 12:44:25 +0100 Subject: [PATCH 1906/3326] media: v4l2-compat-ioctl32.c: fix the indentation commit b7b957d429f601d6d1942122b339474f31191d75 upstream. The indentation of this source is all over the place. Fix this. This patch only changes whitespace. Signed-off-by: Hans Verkuil Acked-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 212 +++++++++--------- 1 file changed, 107 insertions(+), 105 deletions(-) diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index f6a7f8793720e7..44644a2ea3e9bc 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -49,12 +49,12 @@ struct v4l2_window32 { static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) { if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) || - copy_from_user(&kp->w, &up->w, sizeof(up->w)) || - get_user(kp->field, &up->field) || - get_user(kp->chromakey, &up->chromakey) || - get_user(kp->clipcount, &up->clipcount) || - get_user(kp->global_alpha, &up->global_alpha)) - return -EFAULT; + copy_from_user(&kp->w, &up->w, sizeof(up->w)) || + get_user(kp->field, &up->field) || + get_user(kp->chromakey, &up->chromakey) || + get_user(kp->clipcount, &up->clipcount) || + get_user(kp->global_alpha, &up->global_alpha)) + return -EFAULT; if (kp->clipcount > 2048) return -EINVAL; if (kp->clipcount) { @@ -84,11 +84,11 @@ static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) { if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) || - put_user(kp->field, &up->field) || - put_user(kp->chromakey, &up->chromakey) || - put_user(kp->clipcount, &up->clipcount) || - put_user(kp->global_alpha, &up->global_alpha)) - return -EFAULT; + put_user(kp->field, &up->field) || + put_user(kp->chromakey, &up->chromakey) || + put_user(kp->clipcount, &up->clipcount) || + put_user(kp->global_alpha, &up->global_alpha)) + return -EFAULT; return 0; } @@ -100,7 +100,7 @@ static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pi } static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp, - struct v4l2_pix_format_mplane __user *up) + struct v4l2_pix_format_mplane __user *up) { if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane))) return -EFAULT; @@ -115,7 +115,7 @@ static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pi } static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp, - struct v4l2_pix_format_mplane __user *up) + struct v4l2_pix_format_mplane __user *up) { if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane))) return -EFAULT; @@ -238,7 +238,7 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us return get_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta); default: pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n", - kp->type); + kp->type); return -EINVAL; } } @@ -287,7 +287,7 @@ static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us return put_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta); default: pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n", - kp->type); + kp->type); return -EINVAL; } } @@ -321,7 +321,7 @@ static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 { /* other fields are not set by the user, nor used by the driver */ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) || - get_user(kp->index, &up->index)) + get_user(kp->index, &up->index)) return -EFAULT; return 0; } @@ -329,13 +329,14 @@ static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) { if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) || - put_user(kp->index, &up->index) || - put_user(kp->id, &up->id) || - copy_to_user(up->name, kp->name, 24) || - copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) || - put_user(kp->framelines, &up->framelines) || - copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32))) - return -EFAULT; + put_user(kp->index, &up->index) || + put_user(kp->id, &up->id) || + copy_to_user(up->name, kp->name, 24) || + copy_to_user(&up->frameperiod, &kp->frameperiod, + sizeof(kp->frameperiod)) || + put_user(kp->framelines, &up->framelines) || + copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32))) + return -EFAULT; return 0; } @@ -375,14 +376,14 @@ struct v4l2_buffer32 { }; static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, - enum v4l2_memory memory) + enum v4l2_memory memory) { void __user *up_pln; compat_long_t p; if (copy_in_user(up, up32, 2 * sizeof(__u32)) || - copy_in_user(&up->data_offset, &up32->data_offset, - sizeof(__u32))) + copy_in_user(&up->data_offset, &up32->data_offset, + sizeof(__u32))) return -EFAULT; if (memory == V4L2_MEMORY_USERPTR) { @@ -396,7 +397,7 @@ static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __ return -EFAULT; } else { if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset, - sizeof(__u32))) + sizeof(__u32))) return -EFAULT; } @@ -404,23 +405,23 @@ static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __ } static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, - enum v4l2_memory memory) + enum v4l2_memory memory) { if (copy_in_user(up32, up, 2 * sizeof(__u32)) || - copy_in_user(&up32->data_offset, &up->data_offset, - sizeof(__u32))) + copy_in_user(&up32->data_offset, &up->data_offset, + sizeof(__u32))) return -EFAULT; /* For MMAP, driver might've set up the offset, so copy it back. * USERPTR stays the same (was userspace-provided), so no copying. */ if (memory == V4L2_MEMORY_MMAP) if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset, - sizeof(__u32))) + sizeof(__u32))) return -EFAULT; /* For DMABUF, driver might've set up the fd, so copy it back. */ if (memory == V4L2_MEMORY_DMABUF) if (copy_in_user(&up32->m.fd, &up->m.fd, - sizeof(int))) + sizeof(int))) return -EFAULT; return 0; @@ -434,19 +435,19 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user int ret; if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) || - get_user(kp->index, &up->index) || - get_user(kp->type, &up->type) || - get_user(kp->flags, &up->flags) || - get_user(kp->memory, &up->memory) || - get_user(kp->length, &up->length)) - return -EFAULT; + get_user(kp->index, &up->index) || + get_user(kp->type, &up->type) || + get_user(kp->flags, &up->flags) || + get_user(kp->memory, &up->memory) || + get_user(kp->length, &up->length)) + return -EFAULT; if (V4L2_TYPE_IS_OUTPUT(kp->type)) if (get_user(kp->bytesused, &up->bytesused) || - get_user(kp->field, &up->field) || - get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || - get_user(kp->timestamp.tv_usec, - &up->timestamp.tv_usec)) + get_user(kp->field, &up->field) || + get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || + get_user(kp->timestamp.tv_usec, + &up->timestamp.tv_usec)) return -EFAULT; if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { @@ -466,7 +467,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user uplane32 = compat_ptr(p); if (!access_ok(VERIFY_READ, uplane32, - kp->length * sizeof(struct v4l2_plane32))) + kp->length * sizeof(struct v4l2_plane32))) return -EFAULT; /* We don't really care if userspace decides to kill itself @@ -490,12 +491,12 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user break; case V4L2_MEMORY_USERPTR: { - compat_long_t tmp; + compat_long_t tmp; - if (get_user(tmp, &up->m.userptr)) - return -EFAULT; + if (get_user(tmp, &up->m.userptr)) + return -EFAULT; - kp->m.userptr = (unsigned long)compat_ptr(tmp); + kp->m.userptr = (unsigned long)compat_ptr(tmp); } break; case V4L2_MEMORY_OVERLAY: @@ -521,22 +522,23 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user int ret; if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) || - put_user(kp->index, &up->index) || - put_user(kp->type, &up->type) || - put_user(kp->flags, &up->flags) || - put_user(kp->memory, &up->memory)) - return -EFAULT; + put_user(kp->index, &up->index) || + put_user(kp->type, &up->type) || + put_user(kp->flags, &up->flags) || + put_user(kp->memory, &up->memory)) + return -EFAULT; if (put_user(kp->bytesused, &up->bytesused) || - put_user(kp->field, &up->field) || - put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || - put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) || - copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) || - put_user(kp->sequence, &up->sequence) || - put_user(kp->reserved2, &up->reserved2) || - put_user(kp->reserved, &up->reserved) || - put_user(kp->length, &up->length)) - return -EFAULT; + put_user(kp->field, &up->field) || + put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || + put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) || + copy_to_user(&up->timecode, &kp->timecode, + sizeof(struct v4l2_timecode)) || + put_user(kp->sequence, &up->sequence) || + put_user(kp->reserved2, &up->reserved2) || + put_user(kp->reserved, &up->reserved) || + put_user(kp->length, &up->length)) + return -EFAULT; if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { num_planes = kp->length; @@ -600,11 +602,11 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame u32 tmp; if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) || - get_user(tmp, &up->base) || - get_user(kp->capability, &up->capability) || - get_user(kp->flags, &up->flags) || - copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt))) - return -EFAULT; + get_user(tmp, &up->base) || + get_user(kp->capability, &up->capability) || + get_user(kp->flags, &up->flags) || + copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt))) + return -EFAULT; kp->base = (__force void *)compat_ptr(tmp); return 0; } @@ -614,11 +616,11 @@ static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame u32 tmp = (u32)((unsigned long)kp->base); if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) || - put_user(tmp, &up->base) || - put_user(kp->capability, &up->capability) || - put_user(kp->flags, &up->flags) || - copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt))) - return -EFAULT; + put_user(tmp, &up->base) || + put_user(kp->capability, &up->capability) || + put_user(kp->flags, &up->flags) || + copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt))) + return -EFAULT; return 0; } @@ -694,12 +696,12 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext compat_caddr_t p; if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) || - get_user(kp->which, &up->which) || - get_user(kp->count, &up->count) || - get_user(kp->error_idx, &up->error_idx) || - copy_from_user(kp->reserved, up->reserved, - sizeof(kp->reserved))) - return -EFAULT; + get_user(kp->which, &up->which) || + get_user(kp->count, &up->count) || + get_user(kp->error_idx, &up->error_idx) || + copy_from_user(kp->reserved, up->reserved, + sizeof(kp->reserved))) + return -EFAULT; if (kp->count == 0) { kp->controls = NULL; return 0; @@ -710,7 +712,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext return -EFAULT; ucontrols = compat_ptr(p); if (!access_ok(VERIFY_READ, ucontrols, - kp->count * sizeof(struct v4l2_ext_control32))) + kp->count * sizeof(struct v4l2_ext_control32))) return -EFAULT; kcontrols = compat_alloc_user_space(kp->count * sizeof(struct v4l2_ext_control)); @@ -746,11 +748,11 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext compat_caddr_t p; if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) || - put_user(kp->which, &up->which) || - put_user(kp->count, &up->count) || - put_user(kp->error_idx, &up->error_idx) || - copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) - return -EFAULT; + put_user(kp->which, &up->which) || + put_user(kp->count, &up->count) || + put_user(kp->error_idx, &up->error_idx) || + copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) + return -EFAULT; if (!kp->count) return 0; @@ -758,7 +760,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext return -EFAULT; ucontrols = compat_ptr(p); if (!access_ok(VERIFY_WRITE, ucontrols, - n * sizeof(struct v4l2_ext_control32))) + n * sizeof(struct v4l2_ext_control32))) return -EFAULT; while (--n >= 0) { @@ -796,15 +798,15 @@ struct v4l2_event32 { static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up) { if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) || - put_user(kp->type, &up->type) || - copy_to_user(&up->u, &kp->u, sizeof(kp->u)) || - put_user(kp->pending, &up->pending) || - put_user(kp->sequence, &up->sequence) || - put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || - put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) || - put_user(kp->id, &up->id) || - copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32))) - return -EFAULT; + put_user(kp->type, &up->type) || + copy_to_user(&up->u, &kp->u, sizeof(kp->u)) || + put_user(kp->pending, &up->pending) || + put_user(kp->sequence, &up->sequence) || + put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || + put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) || + put_user(kp->id, &up->id) || + copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32))) + return -EFAULT; return 0; } @@ -821,12 +823,12 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) u32 tmp; if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) || - get_user(kp->pad, &up->pad) || - get_user(kp->start_block, &up->start_block) || - get_user(kp->blocks, &up->blocks) || - get_user(tmp, &up->edid) || - copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved))) - return -EFAULT; + get_user(kp->pad, &up->pad) || + get_user(kp->start_block, &up->start_block) || + get_user(kp->blocks, &up->blocks) || + get_user(tmp, &up->edid) || + copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved))) + return -EFAULT; kp->edid = (__force u8 *)compat_ptr(tmp); return 0; } @@ -836,12 +838,12 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) u32 tmp = (u32)((unsigned long)kp->edid); if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) || - put_user(kp->pad, &up->pad) || - put_user(kp->start_block, &up->start_block) || - put_user(kp->blocks, &up->blocks) || - put_user(tmp, &up->edid) || - copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) - return -EFAULT; + put_user(kp->pad, &up->pad) || + put_user(kp->start_block, &up->start_block) || + put_user(kp->blocks, &up->blocks) || + put_user(tmp, &up->edid) || + copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) + return -EFAULT; return 0; } From a38becb087b20a34bdbfa63567f69671849025ad Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 14 Feb 2018 12:44:26 +0100 Subject: [PATCH 1907/3326] media: v4l2-compat-ioctl32.c: move 'helper' functions to __get/put_v4l2_format32 commit 486c521510c44a04cd756a9267e7d1e271c8a4ba upstream. These helper functions do not really help. Move the code to the __get/put_v4l2_format32 functions. Signed-off-by: Hans Verkuil Acked-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 124 ++++-------------- 1 file changed, 24 insertions(+), 100 deletions(-) diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 44644a2ea3e9bc..297c924aefce6a 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -92,92 +92,6 @@ static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user return 0; } -static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format))) - return -EFAULT; - return 0; -} - -static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp, - struct v4l2_pix_format_mplane __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane))) - return -EFAULT; - return 0; -} - -static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format))) - return -EFAULT; - return 0; -} - -static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp, - struct v4l2_pix_format_mplane __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane))) - return -EFAULT; - return 0; -} - -static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format))) - return -EFAULT; - return 0; -} - -static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format))) - return -EFAULT; - return 0; -} - -static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format))) - return -EFAULT; - return 0; -} - -static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format))) - return -EFAULT; - return 0; -} - -static inline int get_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_sdr_format))) - return -EFAULT; - return 0; -} - -static inline int put_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_sdr_format))) - return -EFAULT; - return 0; -} - -static inline int get_v4l2_meta_format(struct v4l2_meta_format *kp, struct v4l2_meta_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_meta_format))) - return -EFAULT; - return 0; -} - -static inline int put_v4l2_meta_format(struct v4l2_meta_format *kp, struct v4l2_meta_format __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_meta_format))) - return -EFAULT; - return 0; -} - struct v4l2_format32 { __u32 type; /* enum v4l2_buf_type */ union { @@ -217,25 +131,30 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us switch (kp->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: - return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix); + return copy_from_user(&kp->fmt.pix, &up->fmt.pix, + sizeof(kp->fmt.pix)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - return get_v4l2_pix_format_mplane(&kp->fmt.pix_mp, - &up->fmt.pix_mp); + return copy_from_user(&kp->fmt.pix_mp, &up->fmt.pix_mp, + sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: return get_v4l2_window32(&kp->fmt.win, &up->fmt.win); case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: - return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi); + return copy_from_user(&kp->fmt.vbi, &up->fmt.vbi, + sizeof(kp->fmt.vbi)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced); + return copy_from_user(&kp->fmt.sliced, &up->fmt.sliced, + sizeof(kp->fmt.sliced)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SDR_CAPTURE: case V4L2_BUF_TYPE_SDR_OUTPUT: - return get_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr); + return copy_from_user(&kp->fmt.sdr, &up->fmt.sdr, + sizeof(kp->fmt.sdr)) ? -EFAULT : 0; case V4L2_BUF_TYPE_META_CAPTURE: - return get_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta); + return copy_from_user(&kp->fmt.meta, &up->fmt.meta, + sizeof(kp->fmt.meta)) ? -EFAULT : 0; default: pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n", kp->type); @@ -266,25 +185,30 @@ static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us switch (kp->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: - return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix); + return copy_to_user(&up->fmt.pix, &kp->fmt.pix, + sizeof(kp->fmt.pix)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - return put_v4l2_pix_format_mplane(&kp->fmt.pix_mp, - &up->fmt.pix_mp); + return copy_to_user(&up->fmt.pix_mp, &kp->fmt.pix_mp, + sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: return put_v4l2_window32(&kp->fmt.win, &up->fmt.win); case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: - return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi); + return copy_to_user(&up->fmt.vbi, &kp->fmt.vbi, + sizeof(kp->fmt.vbi)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced); + return copy_to_user(&up->fmt.sliced, &kp->fmt.sliced, + sizeof(kp->fmt.sliced)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SDR_CAPTURE: case V4L2_BUF_TYPE_SDR_OUTPUT: - return put_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr); + return copy_to_user(&up->fmt.sdr, &kp->fmt.sdr, + sizeof(kp->fmt.sdr)) ? -EFAULT : 0; case V4L2_BUF_TYPE_META_CAPTURE: - return put_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta); + return copy_to_user(&up->fmt.meta, &kp->fmt.meta, + sizeof(kp->fmt.meta)) ? -EFAULT : 0; default: pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n", kp->type); From 16c25072560fb30df50c766260b69b14e3795194 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 14 Feb 2018 12:44:27 +0100 Subject: [PATCH 1908/3326] media: v4l2-compat-ioctl32.c: avoid sizeof(type) commit 333b1e9f96ce05f7498b581509bb30cde03018bf upstream. Instead of doing sizeof(struct foo) use sizeof(*up). There even were cases where 4 * sizeof(__u32) was used instead of sizeof(kp->reserved), which is very dangerous when the size of the reserved array changes. Signed-off-by: Hans Verkuil Acked-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 79 +++++++++---------- 1 file changed, 36 insertions(+), 43 deletions(-) diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 297c924aefce6a..d156b8975f1e71 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -48,7 +48,7 @@ struct v4l2_window32 { static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) { - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) || + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || copy_from_user(&kp->w, &up->w, sizeof(up->w)) || get_user(kp->field, &up->field) || get_user(kp->chromakey, &up->chromakey) || @@ -66,7 +66,7 @@ static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user if (get_user(p, &up->clips)) return -EFAULT; uclips = compat_ptr(p); - kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip)); + kclips = compat_alloc_user_space(n * sizeof(*kclips)); kp->clips = kclips; while (--n >= 0) { if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c))) @@ -164,14 +164,14 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) { - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32))) + if (!access_ok(VERIFY_READ, up, sizeof(*up))) return -EFAULT; return __get_v4l2_format32(kp, up); } static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) { - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format))) return -EFAULT; return __get_v4l2_format32(&kp->format, &up->format); @@ -218,14 +218,14 @@ static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) { - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32))) + if (!access_ok(VERIFY_WRITE, up, sizeof(*up))) return -EFAULT; return __put_v4l2_format32(kp, up); } static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) { - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) || + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) || copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved))) return -EFAULT; @@ -244,7 +244,7 @@ struct v4l2_standard32 { static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) { /* other fields are not set by the user, nor used by the driver */ - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) || + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || get_user(kp->index, &up->index)) return -EFAULT; return 0; @@ -252,14 +252,14 @@ static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) { - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) || + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || put_user(kp->index, &up->index) || put_user(kp->id, &up->id) || - copy_to_user(up->name, kp->name, 24) || + copy_to_user(up->name, kp->name, sizeof(up->name)) || copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) || put_user(kp->framelines, &up->framelines) || - copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32))) + copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved))) return -EFAULT; return 0; } @@ -307,7 +307,7 @@ static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __ if (copy_in_user(up, up32, 2 * sizeof(__u32)) || copy_in_user(&up->data_offset, &up32->data_offset, - sizeof(__u32))) + sizeof(up->data_offset))) return -EFAULT; if (memory == V4L2_MEMORY_USERPTR) { @@ -317,11 +317,11 @@ static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __ if (put_user((unsigned long)up_pln, &up->m.userptr)) return -EFAULT; } else if (memory == V4L2_MEMORY_DMABUF) { - if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int))) + if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(up32->m.fd))) return -EFAULT; } else { if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset, - sizeof(__u32))) + sizeof(up32->m.mem_offset))) return -EFAULT; } @@ -333,19 +333,19 @@ static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __ { if (copy_in_user(up32, up, 2 * sizeof(__u32)) || copy_in_user(&up32->data_offset, &up->data_offset, - sizeof(__u32))) + sizeof(up->data_offset))) return -EFAULT; /* For MMAP, driver might've set up the offset, so copy it back. * USERPTR stays the same (was userspace-provided), so no copying. */ if (memory == V4L2_MEMORY_MMAP) if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset, - sizeof(__u32))) + sizeof(up->m.mem_offset))) return -EFAULT; /* For DMABUF, driver might've set up the fd, so copy it back. */ if (memory == V4L2_MEMORY_DMABUF) if (copy_in_user(&up32->m.fd, &up->m.fd, - sizeof(int))) + sizeof(up->m.fd))) return -EFAULT; return 0; @@ -358,7 +358,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user compat_caddr_t p; int ret; - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) || + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || get_user(kp->index, &up->index) || get_user(kp->type, &up->type) || get_user(kp->flags, &up->flags) || @@ -370,8 +370,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user if (get_user(kp->bytesused, &up->bytesused) || get_user(kp->field, &up->field) || get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || - get_user(kp->timestamp.tv_usec, - &up->timestamp.tv_usec)) + get_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec)) return -EFAULT; if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { @@ -391,13 +390,12 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user uplane32 = compat_ptr(p); if (!access_ok(VERIFY_READ, uplane32, - kp->length * sizeof(struct v4l2_plane32))) + kp->length * sizeof(*uplane32))) return -EFAULT; /* We don't really care if userspace decides to kill itself * by passing a very big num_planes value */ - uplane = compat_alloc_user_space(kp->length * - sizeof(struct v4l2_plane)); + uplane = compat_alloc_user_space(kp->length * sizeof(*uplane)); kp->m.planes = (__force struct v4l2_plane *)uplane; for (num_planes = 0; num_planes < kp->length; num_planes++) { @@ -445,7 +443,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user int num_planes; int ret; - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) || + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || put_user(kp->index, &up->index) || put_user(kp->type, &up->type) || put_user(kp->flags, &up->flags) || @@ -456,8 +454,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user put_user(kp->field, &up->field) || put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) || - copy_to_user(&up->timecode, &kp->timecode, - sizeof(struct v4l2_timecode)) || + copy_to_user(&up->timecode, &kp->timecode, sizeof(kp->timecode)) || put_user(kp->sequence, &up->sequence) || put_user(kp->reserved2, &up->reserved2) || put_user(kp->reserved, &up->reserved) || @@ -525,7 +522,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame { u32 tmp; - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) || + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || get_user(tmp, &up->base) || get_user(kp->capability, &up->capability) || get_user(kp->flags, &up->flags) || @@ -539,7 +536,7 @@ static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame { u32 tmp = (u32)((unsigned long)kp->base); - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) || + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || put_user(tmp, &up->base) || put_user(kp->capability, &up->capability) || put_user(kp->flags, &up->flags) || @@ -564,14 +561,14 @@ struct v4l2_input32 { Otherwise it is identical to the 32-bit version. */ static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up) { - if (copy_from_user(kp, up, sizeof(struct v4l2_input32))) + if (copy_from_user(kp, up, sizeof(*up))) return -EFAULT; return 0; } static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up) { - if (copy_to_user(up, kp, sizeof(struct v4l2_input32))) + if (copy_to_user(up, kp, sizeof(*up))) return -EFAULT; return 0; } @@ -619,12 +616,11 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext unsigned int n; compat_caddr_t p; - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) || + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || get_user(kp->which, &up->which) || get_user(kp->count, &up->count) || get_user(kp->error_idx, &up->error_idx) || - copy_from_user(kp->reserved, up->reserved, - sizeof(kp->reserved))) + copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved))) return -EFAULT; if (kp->count == 0) { kp->controls = NULL; @@ -635,11 +631,9 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext if (get_user(p, &up->controls)) return -EFAULT; ucontrols = compat_ptr(p); - if (!access_ok(VERIFY_READ, ucontrols, - kp->count * sizeof(struct v4l2_ext_control32))) + if (!access_ok(VERIFY_READ, ucontrols, kp->count * sizeof(*ucontrols))) return -EFAULT; - kcontrols = compat_alloc_user_space(kp->count * - sizeof(struct v4l2_ext_control)); + kcontrols = compat_alloc_user_space(kp->count * sizeof(*kcontrols)); kp->controls = (__force struct v4l2_ext_control *)kcontrols; for (n = 0; n < kp->count; n++) { u32 id; @@ -671,7 +665,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext int n = kp->count; compat_caddr_t p; - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) || + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || put_user(kp->which, &up->which) || put_user(kp->count, &up->count) || put_user(kp->error_idx, &up->error_idx) || @@ -683,8 +677,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext if (get_user(p, &up->controls)) return -EFAULT; ucontrols = compat_ptr(p); - if (!access_ok(VERIFY_WRITE, ucontrols, - n * sizeof(struct v4l2_ext_control32))) + if (!access_ok(VERIFY_WRITE, ucontrols, n * sizeof(*ucontrols))) return -EFAULT; while (--n >= 0) { @@ -721,7 +714,7 @@ struct v4l2_event32 { static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up) { - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) || + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || put_user(kp->type, &up->type) || copy_to_user(&up->u, &kp->u, sizeof(kp->u)) || put_user(kp->pending, &up->pending) || @@ -729,7 +722,7 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) || put_user(kp->id, &up->id) || - copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32))) + copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved))) return -EFAULT; return 0; } @@ -746,7 +739,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) { u32 tmp; - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) || + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || get_user(kp->pad, &up->pad) || get_user(kp->start_block, &up->start_block) || get_user(kp->blocks, &up->blocks) || @@ -761,7 +754,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) { u32 tmp = (u32)((unsigned long)kp->edid); - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) || + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || put_user(kp->pad, &up->pad) || put_user(kp->start_block, &up->start_block) || put_user(kp->blocks, &up->blocks) || From 3df1197724661c15d23b8359ec42af04a648b848 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 14 Feb 2018 12:44:28 +0100 Subject: [PATCH 1909/3326] media: v4l2-compat-ioctl32.c: copy m.userptr in put_v4l2_plane32 commit 8ed5a59dcb47a6f76034ee760b36e089f3e82529 upstream. The struct v4l2_plane32 should set m.userptr as well. The same happens in v4l2_buffer32 and v4l2-compliance tests for this. Signed-off-by: Hans Verkuil Acked-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 47 +++++++++++-------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index d156b8975f1e71..62d44fab56711e 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -310,19 +310,24 @@ static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __ sizeof(up->data_offset))) return -EFAULT; - if (memory == V4L2_MEMORY_USERPTR) { + switch (memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset, + sizeof(up32->m.mem_offset))) + return -EFAULT; + break; + case V4L2_MEMORY_USERPTR: if (get_user(p, &up32->m.userptr)) return -EFAULT; up_pln = compat_ptr(p); if (put_user((unsigned long)up_pln, &up->m.userptr)) return -EFAULT; - } else if (memory == V4L2_MEMORY_DMABUF) { + break; + case V4L2_MEMORY_DMABUF: if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(up32->m.fd))) return -EFAULT; - } else { - if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset, - sizeof(up32->m.mem_offset))) - return -EFAULT; + break; } return 0; @@ -331,22 +336,32 @@ static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __ static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, enum v4l2_memory memory) { + unsigned long p; + if (copy_in_user(up32, up, 2 * sizeof(__u32)) || copy_in_user(&up32->data_offset, &up->data_offset, sizeof(up->data_offset))) return -EFAULT; - /* For MMAP, driver might've set up the offset, so copy it back. - * USERPTR stays the same (was userspace-provided), so no copying. */ - if (memory == V4L2_MEMORY_MMAP) + switch (memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset, sizeof(up->m.mem_offset))) return -EFAULT; - /* For DMABUF, driver might've set up the fd, so copy it back. */ - if (memory == V4L2_MEMORY_DMABUF) + break; + case V4L2_MEMORY_USERPTR: + if (get_user(p, &up->m.userptr) || + put_user((compat_ulong_t)ptr_to_compat((__force void *)p), + &up32->m.userptr)) + return -EFAULT; + break; + case V4L2_MEMORY_DMABUF: if (copy_in_user(&up32->m.fd, &up->m.fd, sizeof(up->m.fd))) return -EFAULT; + break; + } return 0; } @@ -408,6 +423,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user } else { switch (kp->memory) { case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: if (get_user(kp->m.offset, &up->m.offset)) return -EFAULT; break; @@ -421,10 +437,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user kp->m.userptr = (unsigned long)compat_ptr(tmp); } break; - case V4L2_MEMORY_OVERLAY: - if (get_user(kp->m.offset, &up->m.offset)) - return -EFAULT; - break; case V4L2_MEMORY_DMABUF: if (get_user(kp->m.fd, &up->m.fd)) return -EFAULT; @@ -481,6 +493,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user } else { switch (kp->memory) { case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: if (put_user(kp->m.offset, &up->m.offset)) return -EFAULT; break; @@ -488,10 +501,6 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user if (put_user(kp->m.userptr, &up->m.userptr)) return -EFAULT; break; - case V4L2_MEMORY_OVERLAY: - if (put_user(kp->m.offset, &up->m.offset)) - return -EFAULT; - break; case V4L2_MEMORY_DMABUF: if (put_user(kp->m.fd, &up->m.fd)) return -EFAULT; From f1b572d34648313a07856e5b315d8329eb649c93 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 14 Feb 2018 12:44:29 +0100 Subject: [PATCH 1910/3326] media: v4l2-compat-ioctl32.c: fix ctrl_is_pointer commit b8c601e8af2d08f733d74defa8465303391bb930 upstream. ctrl_is_pointer just hardcoded two known string controls, but that caused problems when using e.g. custom controls that use a pointer for the payload. Reimplement this function: it now finds the v4l2_ctrl (if the driver uses the control framework) or it calls vidioc_query_ext_ctrl (if the driver implements that directly). In both cases it can now check if the control is a pointer control or not. Signed-off-by: Hans Verkuil Acked-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 57 ++++++++++++------- 1 file changed, 38 insertions(+), 19 deletions(-) diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 62d44fab56711e..d11334712c6554 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -601,24 +603,39 @@ struct v4l2_ext_control32 { }; } __attribute__ ((packed)); -/* The following function really belong in v4l2-common, but that causes - a circular dependency between modules. We need to think about this, but - for now this will do. */ - -/* Return non-zero if this control is a pointer type. Currently only - type STRING is a pointer type. */ -static inline int ctrl_is_pointer(u32 id) +/* Return true if this control is a pointer type. */ +static inline bool ctrl_is_pointer(struct file *file, u32 id) { - switch (id) { - case V4L2_CID_RDS_TX_PS_NAME: - case V4L2_CID_RDS_TX_RADIO_TEXT: - return 1; - default: - return 0; + struct video_device *vdev = video_devdata(file); + struct v4l2_fh *fh = NULL; + struct v4l2_ctrl_handler *hdl = NULL; + struct v4l2_query_ext_ctrl qec = { id }; + const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops; + + if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags)) + fh = file->private_data; + + if (fh && fh->ctrl_handler) + hdl = fh->ctrl_handler; + else if (vdev->ctrl_handler) + hdl = vdev->ctrl_handler; + + if (hdl) { + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id); + + return ctrl && ctrl->is_ptr; } + + if (!ops->vidioc_query_ext_ctrl) + return false; + + return !ops->vidioc_query_ext_ctrl(file, fh, &qec) && + (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD); } -static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up) +static int get_v4l2_ext_controls32(struct file *file, + struct v4l2_ext_controls *kp, + struct v4l2_ext_controls32 __user *up) { struct v4l2_ext_control32 __user *ucontrols; struct v4l2_ext_control __user *kcontrols; @@ -651,7 +668,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext return -EFAULT; if (get_user(id, &kcontrols->id)) return -EFAULT; - if (ctrl_is_pointer(id)) { + if (ctrl_is_pointer(file, id)) { void __user *s; if (get_user(p, &ucontrols->string)) @@ -666,7 +683,9 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext return 0; } -static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up) +static int put_v4l2_ext_controls32(struct file *file, + struct v4l2_ext_controls *kp, + struct v4l2_ext_controls32 __user *up) { struct v4l2_ext_control32 __user *ucontrols; struct v4l2_ext_control __user *kcontrols = @@ -698,7 +717,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext /* Do not modify the pointer when copying a pointer control. The contents of the pointer was changed, not the pointer itself. */ - if (ctrl_is_pointer(id)) + if (ctrl_is_pointer(file, id)) size -= sizeof(ucontrols->value64); if (copy_in_user(ucontrols, kcontrols, size)) return -EFAULT; @@ -912,7 +931,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar case VIDIOC_G_EXT_CTRLS: case VIDIOC_S_EXT_CTRLS: case VIDIOC_TRY_EXT_CTRLS: - err = get_v4l2_ext_controls32(&karg.v2ecs, up); + err = get_v4l2_ext_controls32(file, &karg.v2ecs, up); compatible_arg = 0; break; case VIDIOC_DQEVENT: @@ -939,7 +958,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar case VIDIOC_G_EXT_CTRLS: case VIDIOC_S_EXT_CTRLS: case VIDIOC_TRY_EXT_CTRLS: - if (put_v4l2_ext_controls32(&karg.v2ecs, up)) + if (put_v4l2_ext_controls32(file, &karg.v2ecs, up)) err = -EFAULT; break; case VIDIOC_S_EDID: From a20ab4df701b76b10eedcdcbe0a108462d76d6b6 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 14 Feb 2018 12:44:30 +0100 Subject: [PATCH 1911/3326] media: v4l2-compat-ioctl32.c: copy clip list in put_v4l2_window32 commit a751be5b142ef6bcbbb96d9899516f4d9c8d0ef4 upstream. put_v4l2_window32() didn't copy back the clip list to userspace. Drivers can update the clip rectangles, so this should be done. Signed-off-by: Hans Verkuil Acked-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 59 +++++++++++++------ 1 file changed, 40 insertions(+), 19 deletions(-) diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index d11334712c6554..de3e99dc3caa77 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -50,6 +50,11 @@ struct v4l2_window32 { static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) { + struct v4l2_clip32 __user *uclips; + struct v4l2_clip __user *kclips; + compat_caddr_t p; + u32 n; + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || copy_from_user(&kp->w, &up->w, sizeof(up->w)) || get_user(kp->field, &up->field) || @@ -59,38 +64,54 @@ static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user return -EFAULT; if (kp->clipcount > 2048) return -EINVAL; - if (kp->clipcount) { - struct v4l2_clip32 __user *uclips; - struct v4l2_clip __user *kclips; - int n = kp->clipcount; - compat_caddr_t p; + if (!kp->clipcount) { + kp->clips = NULL; + return 0; + } - if (get_user(p, &up->clips)) + n = kp->clipcount; + if (get_user(p, &up->clips)) + return -EFAULT; + uclips = compat_ptr(p); + kclips = compat_alloc_user_space(n * sizeof(*kclips)); + kp->clips = kclips; + while (n--) { + if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c))) return -EFAULT; - uclips = compat_ptr(p); - kclips = compat_alloc_user_space(n * sizeof(*kclips)); - kp->clips = kclips; - while (--n >= 0) { - if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c))) - return -EFAULT; - if (put_user(n ? kclips + 1 : NULL, &kclips->next)) - return -EFAULT; - uclips += 1; - kclips += 1; - } - } else - kp->clips = NULL; + if (put_user(n ? kclips + 1 : NULL, &kclips->next)) + return -EFAULT; + uclips++; + kclips++; + } return 0; } static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) { + struct v4l2_clip __user *kclips = kp->clips; + struct v4l2_clip32 __user *uclips; + u32 n = kp->clipcount; + compat_caddr_t p; + if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) || put_user(kp->field, &up->field) || put_user(kp->chromakey, &up->chromakey) || put_user(kp->clipcount, &up->clipcount) || put_user(kp->global_alpha, &up->global_alpha)) return -EFAULT; + + if (!kp->clipcount) + return 0; + + if (get_user(p, &up->clips)) + return -EFAULT; + uclips = compat_ptr(p); + while (n--) { + if (copy_in_user(&uclips->c, &kclips->c, sizeof(uclips->c))) + return -EFAULT; + uclips++; + kclips++; + } return 0; } From efabe94f8a0d1aedb1df67c2e1a56d310baf7ce2 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 14 Feb 2018 12:44:31 +0100 Subject: [PATCH 1912/3326] media: v4l2-compat-ioctl32.c: drop pr_info for unknown buffer type commit 169f24ca68bf0f247d111aef07af00dd3a02ae88 upstream. There is nothing wrong with using an unknown buffer type. So stop spamming the kernel log whenever this happens. The kernel will just return -EINVAL to signal this. Signed-off-by: Hans Verkuil Acked-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index de3e99dc3caa77..bef9c990c9bd6c 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -179,8 +179,6 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us return copy_from_user(&kp->fmt.meta, &up->fmt.meta, sizeof(kp->fmt.meta)) ? -EFAULT : 0; default: - pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n", - kp->type); return -EINVAL; } } @@ -233,8 +231,6 @@ static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us return copy_to_user(&up->fmt.meta, &kp->fmt.meta, sizeof(kp->fmt.meta)) ? -EFAULT : 0; default: - pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n", - kp->type); return -EINVAL; } } From b3f69836c92b336cd1ca08bcedf3250265e66e46 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 14 Feb 2018 12:44:32 +0100 Subject: [PATCH 1913/3326] media: v4l2-compat-ioctl32.c: don't copy back the result for certain errors commit d83a8243aaefe62ace433e4384a4f077bed86acb upstream. Some ioctls need to copy back the result even if the ioctl returned an error. However, don't do this for the error code -ENOTTY. It makes no sense in that cases. Signed-off-by: Hans Verkuil Acked-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index bef9c990c9bd6c..5603c7c1edd54b 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -968,6 +968,9 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar set_fs(old_fs); } + if (err == -ENOTTY) + return err; + /* Special case: even after an error we need to put the results back for these ioctls since the error_idx will contain information on which control failed. */ From ad01b40bb9be0987e883655205606c710a8785a5 Mon Sep 17 00:00:00 2001 From: Daniel Mentz Date: Wed, 14 Feb 2018 12:44:33 +0100 Subject: [PATCH 1914/3326] media: v4l2-compat-ioctl32.c: refactor compat ioctl32 logic commit a1dfb4c48cc1e64eeb7800a27c66a6f7e88d075a upstream. The 32-bit compat v4l2 ioctl handling is implemented based on its 64-bit equivalent. It converts 32-bit data structures into its 64-bit equivalents and needs to provide the data to the 64-bit ioctl in user space memory which is commonly allocated using compat_alloc_user_space(). However, due to how that function is implemented, it can only be called a single time for every syscall invocation. Supposedly to avoid this limitation, the existing code uses a mix of memory from the kernel stack and memory allocated through compat_alloc_user_space(). Under normal circumstances, this would not work, because the 64-bit ioctl expects all pointers to point to user space memory. As a workaround, set_fs(KERNEL_DS) is called to temporarily disable this extra safety check and allow kernel pointers. However, this might introduce a security vulnerability: The result of the 32-bit to 64-bit conversion is writeable by user space because the output buffer has been allocated via compat_alloc_user_space(). A malicious user space process could then manipulate pointers inside this output buffer, and due to the previous set_fs(KERNEL_DS) call, functions like get_user() or put_user() no longer prevent kernel memory access. The new approach is to pre-calculate the total amount of user space memory that is needed, allocate it using compat_alloc_user_space() and then divide up the allocated memory to accommodate all data structures that need to be converted. An alternative approach would have been to retain the union type karg that they allocated on the kernel stack in do_video_ioctl(), copy all data from user space into karg and then back to user space. However, we decided against this approach because it does not align with other compat syscall implementations. Instead, we tried to replicate the get_user/put_user pairs as found in other places in the kernel: if (get_user(clipcount, &up->clipcount) || put_user(clipcount, &kp->clipcount)) return -EFAULT; Notes from hans.verkuil@cisco.com: This patch was taken from: https://github.com/LineageOS/android_kernel_samsung_apq8084/commit/97b733953c06e4f0398ade18850f0817778255f7 Clearly nobody could be bothered to upstream this patch or at minimum tell us :-( We only heard about this a week ago. This patch was rebased and cleaned up. Compared to the original I also swapped the order of the convert_in_user arguments so that they matched copy_in_user. It was hard to review otherwise. I also replaced the ALLOC_USER_SPACE/ALLOC_AND_GET by a normal function. Fixes: 6b5a9492ca ("v4l: introduce string control support.") Signed-off-by: Daniel Mentz Co-developed-by: Hans Verkuil Acked-by: Sakari Ailus Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 740 +++++++++++------- 1 file changed, 477 insertions(+), 263 deletions(-) diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 5603c7c1edd54b..ec4a69728ed42b 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -22,6 +22,14 @@ #include #include +/* Use the same argument order as copy_in_user */ +#define assign_in_user(to, from) \ +({ \ + typeof(*from) __assign_tmp; \ + \ + get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \ +}) + static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret = -ENOIOCTLCMD; @@ -48,37 +56,41 @@ struct v4l2_window32 { __u8 global_alpha; }; -static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) +static int get_v4l2_window32(struct v4l2_window __user *kp, + struct v4l2_window32 __user *up, + void __user *aux_buf, u32 aux_space) { struct v4l2_clip32 __user *uclips; struct v4l2_clip __user *kclips; compat_caddr_t p; - u32 n; + u32 clipcount; if (!access_ok(VERIFY_READ, up, sizeof(*up)) || - copy_from_user(&kp->w, &up->w, sizeof(up->w)) || - get_user(kp->field, &up->field) || - get_user(kp->chromakey, &up->chromakey) || - get_user(kp->clipcount, &up->clipcount) || - get_user(kp->global_alpha, &up->global_alpha)) + copy_in_user(&kp->w, &up->w, sizeof(up->w)) || + assign_in_user(&kp->field, &up->field) || + assign_in_user(&kp->chromakey, &up->chromakey) || + assign_in_user(&kp->global_alpha, &up->global_alpha) || + get_user(clipcount, &up->clipcount) || + put_user(clipcount, &kp->clipcount)) return -EFAULT; - if (kp->clipcount > 2048) + if (clipcount > 2048) return -EINVAL; - if (!kp->clipcount) { - kp->clips = NULL; - return 0; - } + if (!clipcount) + return put_user(NULL, &kp->clips); - n = kp->clipcount; if (get_user(p, &up->clips)) return -EFAULT; uclips = compat_ptr(p); - kclips = compat_alloc_user_space(n * sizeof(*kclips)); - kp->clips = kclips; - while (n--) { + if (aux_space < clipcount * sizeof(*kclips)) + return -EFAULT; + kclips = aux_buf; + if (put_user(kclips, &kp->clips)) + return -EFAULT; + + while (clipcount--) { if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c))) return -EFAULT; - if (put_user(n ? kclips + 1 : NULL, &kclips->next)) + if (put_user(clipcount ? kclips + 1 : NULL, &kclips->next)) return -EFAULT; uclips++; kclips++; @@ -86,27 +98,28 @@ static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user return 0; } -static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) +static int put_v4l2_window32(struct v4l2_window __user *kp, + struct v4l2_window32 __user *up) { struct v4l2_clip __user *kclips = kp->clips; struct v4l2_clip32 __user *uclips; - u32 n = kp->clipcount; compat_caddr_t p; - - if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) || - put_user(kp->field, &up->field) || - put_user(kp->chromakey, &up->chromakey) || - put_user(kp->clipcount, &up->clipcount) || - put_user(kp->global_alpha, &up->global_alpha)) + u32 clipcount; + + if (copy_in_user(&up->w, &kp->w, sizeof(kp->w)) || + assign_in_user(&up->field, &kp->field) || + assign_in_user(&up->chromakey, &kp->chromakey) || + assign_in_user(&up->global_alpha, &kp->global_alpha) || + get_user(clipcount, &kp->clipcount) || + put_user(clipcount, &up->clipcount)) return -EFAULT; - - if (!kp->clipcount) + if (!clipcount) return 0; if (get_user(p, &up->clips)) return -EFAULT; uclips = compat_ptr(p); - while (n--) { + while (clipcount--) { if (copy_in_user(&uclips->c, &kclips->c, sizeof(uclips->c))) return -EFAULT; uclips++; @@ -146,107 +159,164 @@ struct v4l2_create_buffers32 { __u32 reserved[8]; }; -static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) +static int __bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size) +{ + u32 type; + + if (get_user(type, &up->type)) + return -EFAULT; + + switch (type) { + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: { + u32 clipcount; + + if (get_user(clipcount, &up->fmt.win.clipcount)) + return -EFAULT; + if (clipcount > 2048) + return -EINVAL; + *size = clipcount * sizeof(struct v4l2_clip); + return 0; + } + default: + *size = 0; + return 0; + } +} + +static int bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size) +{ + if (!access_ok(VERIFY_READ, up, sizeof(*up))) + return -EFAULT; + return __bufsize_v4l2_format(up, size); +} + +static int __get_v4l2_format32(struct v4l2_format __user *kp, + struct v4l2_format32 __user *up, + void __user *aux_buf, u32 aux_space) { - if (get_user(kp->type, &up->type)) + u32 type; + + if (get_user(type, &up->type) || put_user(type, &kp->type)) return -EFAULT; - switch (kp->type) { + switch (type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: - return copy_from_user(&kp->fmt.pix, &up->fmt.pix, - sizeof(kp->fmt.pix)) ? -EFAULT : 0; + return copy_in_user(&kp->fmt.pix, &up->fmt.pix, + sizeof(kp->fmt.pix)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - return copy_from_user(&kp->fmt.pix_mp, &up->fmt.pix_mp, - sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0; + return copy_in_user(&kp->fmt.pix_mp, &up->fmt.pix_mp, + sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: - return get_v4l2_window32(&kp->fmt.win, &up->fmt.win); + return get_v4l2_window32(&kp->fmt.win, &up->fmt.win, + aux_buf, aux_space); case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: - return copy_from_user(&kp->fmt.vbi, &up->fmt.vbi, - sizeof(kp->fmt.vbi)) ? -EFAULT : 0; + return copy_in_user(&kp->fmt.vbi, &up->fmt.vbi, + sizeof(kp->fmt.vbi)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - return copy_from_user(&kp->fmt.sliced, &up->fmt.sliced, - sizeof(kp->fmt.sliced)) ? -EFAULT : 0; + return copy_in_user(&kp->fmt.sliced, &up->fmt.sliced, + sizeof(kp->fmt.sliced)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SDR_CAPTURE: case V4L2_BUF_TYPE_SDR_OUTPUT: - return copy_from_user(&kp->fmt.sdr, &up->fmt.sdr, - sizeof(kp->fmt.sdr)) ? -EFAULT : 0; + return copy_in_user(&kp->fmt.sdr, &up->fmt.sdr, + sizeof(kp->fmt.sdr)) ? -EFAULT : 0; case V4L2_BUF_TYPE_META_CAPTURE: - return copy_from_user(&kp->fmt.meta, &up->fmt.meta, - sizeof(kp->fmt.meta)) ? -EFAULT : 0; + return copy_in_user(&kp->fmt.meta, &up->fmt.meta, + sizeof(kp->fmt.meta)) ? -EFAULT : 0; default: return -EINVAL; } } -static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) +static int get_v4l2_format32(struct v4l2_format __user *kp, + struct v4l2_format32 __user *up, + void __user *aux_buf, u32 aux_space) { if (!access_ok(VERIFY_READ, up, sizeof(*up))) return -EFAULT; - return __get_v4l2_format32(kp, up); + return __get_v4l2_format32(kp, up, aux_buf, aux_space); } -static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) +static int bufsize_v4l2_create(struct v4l2_create_buffers32 __user *up, + u32 *size) +{ + if (!access_ok(VERIFY_READ, up, sizeof(*up))) + return -EFAULT; + return __bufsize_v4l2_format(&up->format, size); +} + +static int get_v4l2_create32(struct v4l2_create_buffers __user *kp, + struct v4l2_create_buffers32 __user *up, + void __user *aux_buf, u32 aux_space) { if (!access_ok(VERIFY_READ, up, sizeof(*up)) || - copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format))) + copy_in_user(kp, up, + offsetof(struct v4l2_create_buffers32, format))) return -EFAULT; - return __get_v4l2_format32(&kp->format, &up->format); + return __get_v4l2_format32(&kp->format, &up->format, + aux_buf, aux_space); } -static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) +static int __put_v4l2_format32(struct v4l2_format __user *kp, + struct v4l2_format32 __user *up) { - if (put_user(kp->type, &up->type)) + u32 type; + + if (get_user(type, &kp->type)) return -EFAULT; - switch (kp->type) { + switch (type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: - return copy_to_user(&up->fmt.pix, &kp->fmt.pix, + return copy_in_user(&up->fmt.pix, &kp->fmt.pix, sizeof(kp->fmt.pix)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - return copy_to_user(&up->fmt.pix_mp, &kp->fmt.pix_mp, + return copy_in_user(&up->fmt.pix_mp, &kp->fmt.pix_mp, sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: return put_v4l2_window32(&kp->fmt.win, &up->fmt.win); case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: - return copy_to_user(&up->fmt.vbi, &kp->fmt.vbi, + return copy_in_user(&up->fmt.vbi, &kp->fmt.vbi, sizeof(kp->fmt.vbi)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - return copy_to_user(&up->fmt.sliced, &kp->fmt.sliced, + return copy_in_user(&up->fmt.sliced, &kp->fmt.sliced, sizeof(kp->fmt.sliced)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SDR_CAPTURE: case V4L2_BUF_TYPE_SDR_OUTPUT: - return copy_to_user(&up->fmt.sdr, &kp->fmt.sdr, + return copy_in_user(&up->fmt.sdr, &kp->fmt.sdr, sizeof(kp->fmt.sdr)) ? -EFAULT : 0; case V4L2_BUF_TYPE_META_CAPTURE: - return copy_to_user(&up->fmt.meta, &kp->fmt.meta, + return copy_in_user(&up->fmt.meta, &kp->fmt.meta, sizeof(kp->fmt.meta)) ? -EFAULT : 0; default: return -EINVAL; } } -static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) +static int put_v4l2_format32(struct v4l2_format __user *kp, + struct v4l2_format32 __user *up) { if (!access_ok(VERIFY_WRITE, up, sizeof(*up))) return -EFAULT; return __put_v4l2_format32(kp, up); } -static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) +static int put_v4l2_create32(struct v4l2_create_buffers __user *kp, + struct v4l2_create_buffers32 __user *up) { if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || - copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) || - copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved))) + copy_in_user(up, kp, + offsetof(struct v4l2_create_buffers32, format)) || + copy_in_user(up->reserved, kp->reserved, sizeof(kp->reserved))) return -EFAULT; return __put_v4l2_format32(&kp->format, &up->format); } @@ -260,25 +330,27 @@ struct v4l2_standard32 { __u32 reserved[4]; }; -static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) +static int get_v4l2_standard32(struct v4l2_standard __user *kp, + struct v4l2_standard32 __user *up) { /* other fields are not set by the user, nor used by the driver */ if (!access_ok(VERIFY_READ, up, sizeof(*up)) || - get_user(kp->index, &up->index)) + assign_in_user(&kp->index, &up->index)) return -EFAULT; return 0; } -static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) +static int put_v4l2_standard32(struct v4l2_standard __user *kp, + struct v4l2_standard32 __user *up) { if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || - put_user(kp->index, &up->index) || - put_user(kp->id, &up->id) || - copy_to_user(up->name, kp->name, sizeof(up->name)) || - copy_to_user(&up->frameperiod, &kp->frameperiod, - sizeof(kp->frameperiod)) || - put_user(kp->framelines, &up->framelines) || - copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved))) + assign_in_user(&up->index, &kp->index) || + assign_in_user(&up->id, &kp->id) || + copy_in_user(up->name, kp->name, sizeof(up->name)) || + copy_in_user(&up->frameperiod, &kp->frameperiod, + sizeof(up->frameperiod)) || + assign_in_user(&up->framelines, &kp->framelines) || + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) return -EFAULT; return 0; } @@ -318,11 +390,11 @@ struct v4l2_buffer32 { __u32 reserved; }; -static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, +static int get_v4l2_plane32(struct v4l2_plane __user *up, + struct v4l2_plane32 __user *up32, enum v4l2_memory memory) { - void __user *up_pln; - compat_long_t p; + compat_ulong_t p; if (copy_in_user(up, up32, 2 * sizeof(__u32)) || copy_in_user(&up->data_offset, &up32->data_offset, @@ -337,10 +409,8 @@ static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __ return -EFAULT; break; case V4L2_MEMORY_USERPTR: - if (get_user(p, &up32->m.userptr)) - return -EFAULT; - up_pln = compat_ptr(p); - if (put_user((unsigned long)up_pln, &up->m.userptr)) + if (get_user(p, &up32->m.userptr) || + put_user((unsigned long)compat_ptr(p), &up->m.userptr)) return -EFAULT; break; case V4L2_MEMORY_DMABUF: @@ -352,7 +422,8 @@ static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __ return 0; } -static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, +static int put_v4l2_plane32(struct v4l2_plane __user *up, + struct v4l2_plane32 __user *up32, enum v4l2_memory memory) { unsigned long p; @@ -376,8 +447,7 @@ static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __ return -EFAULT; break; case V4L2_MEMORY_DMABUF: - if (copy_in_user(&up32->m.fd, &up->m.fd, - sizeof(up->m.fd))) + if (copy_in_user(&up32->m.fd, &up->m.fd, sizeof(up->m.fd))) return -EFAULT; break; } @@ -385,79 +455,121 @@ static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __ return 0; } -static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up) +static int bufsize_v4l2_buffer(struct v4l2_buffer32 __user *up, u32 *size) +{ + u32 type; + u32 length; + + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + get_user(type, &up->type) || + get_user(length, &up->length)) + return -EFAULT; + + if (V4L2_TYPE_IS_MULTIPLANAR(type)) { + if (length > VIDEO_MAX_PLANES) + return -EINVAL; + + /* + * We don't really care if userspace decides to kill itself + * by passing a very big length value + */ + *size = length * sizeof(struct v4l2_plane); + } else { + *size = 0; + } + return 0; +} + +static int get_v4l2_buffer32(struct v4l2_buffer __user *kp, + struct v4l2_buffer32 __user *up, + void __user *aux_buf, u32 aux_space) { + u32 type; + u32 length; + enum v4l2_memory memory; struct v4l2_plane32 __user *uplane32; struct v4l2_plane __user *uplane; compat_caddr_t p; int ret; if (!access_ok(VERIFY_READ, up, sizeof(*up)) || - get_user(kp->index, &up->index) || - get_user(kp->type, &up->type) || - get_user(kp->flags, &up->flags) || - get_user(kp->memory, &up->memory) || - get_user(kp->length, &up->length)) + assign_in_user(&kp->index, &up->index) || + get_user(type, &up->type) || + put_user(type, &kp->type) || + assign_in_user(&kp->flags, &up->flags) || + get_user(memory, &up->memory) || + put_user(memory, &kp->memory) || + get_user(length, &up->length) || + put_user(length, &kp->length)) return -EFAULT; - if (V4L2_TYPE_IS_OUTPUT(kp->type)) - if (get_user(kp->bytesused, &up->bytesused) || - get_user(kp->field, &up->field) || - get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || - get_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec)) + if (V4L2_TYPE_IS_OUTPUT(type)) + if (assign_in_user(&kp->bytesused, &up->bytesused) || + assign_in_user(&kp->field, &up->field) || + assign_in_user(&kp->timestamp.tv_sec, + &up->timestamp.tv_sec) || + assign_in_user(&kp->timestamp.tv_usec, + &up->timestamp.tv_usec)) return -EFAULT; - if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { - unsigned int num_planes; + if (V4L2_TYPE_IS_MULTIPLANAR(type)) { + u32 num_planes = length; - if (kp->length == 0) { - kp->m.planes = NULL; - /* num_planes == 0 is legal, e.g. when userspace doesn't - * need planes array on DQBUF*/ - return 0; - } else if (kp->length > VIDEO_MAX_PLANES) { - return -EINVAL; + if (num_planes == 0) { + /* + * num_planes == 0 is legal, e.g. when userspace doesn't + * need planes array on DQBUF + */ + return put_user(NULL, &kp->m.planes); } + if (num_planes > VIDEO_MAX_PLANES) + return -EINVAL; if (get_user(p, &up->m.planes)) return -EFAULT; uplane32 = compat_ptr(p); if (!access_ok(VERIFY_READ, uplane32, - kp->length * sizeof(*uplane32))) + num_planes * sizeof(*uplane32))) return -EFAULT; - /* We don't really care if userspace decides to kill itself - * by passing a very big num_planes value */ - uplane = compat_alloc_user_space(kp->length * sizeof(*uplane)); - kp->m.planes = (__force struct v4l2_plane *)uplane; + /* + * We don't really care if userspace decides to kill itself + * by passing a very big num_planes value + */ + if (aux_space < num_planes * sizeof(*uplane)) + return -EFAULT; - for (num_planes = 0; num_planes < kp->length; num_planes++) { - ret = get_v4l2_plane32(uplane, uplane32, kp->memory); + uplane = aux_buf; + if (put_user((__force struct v4l2_plane *)uplane, + &kp->m.planes)) + return -EFAULT; + + while (num_planes--) { + ret = get_v4l2_plane32(uplane, uplane32, memory); if (ret) return ret; - ++uplane; - ++uplane32; + uplane++; + uplane32++; } } else { - switch (kp->memory) { + switch (memory) { case V4L2_MEMORY_MMAP: case V4L2_MEMORY_OVERLAY: - if (get_user(kp->m.offset, &up->m.offset)) + if (assign_in_user(&kp->m.offset, &up->m.offset)) return -EFAULT; break; - case V4L2_MEMORY_USERPTR: - { - compat_long_t tmp; - - if (get_user(tmp, &up->m.userptr)) - return -EFAULT; + case V4L2_MEMORY_USERPTR: { + compat_ulong_t userptr; - kp->m.userptr = (unsigned long)compat_ptr(tmp); - } + if (get_user(userptr, &up->m.userptr) || + put_user((unsigned long)compat_ptr(userptr), + &kp->m.userptr)) + return -EFAULT; break; + } case V4L2_MEMORY_DMABUF: - if (get_user(kp->m.fd, &up->m.fd)) + if (assign_in_user(&kp->m.fd, &up->m.fd)) return -EFAULT; break; } @@ -466,62 +578,70 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user return 0; } -static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up) +static int put_v4l2_buffer32(struct v4l2_buffer __user *kp, + struct v4l2_buffer32 __user *up) { + u32 type; + u32 length; + enum v4l2_memory memory; struct v4l2_plane32 __user *uplane32; struct v4l2_plane __user *uplane; compat_caddr_t p; - int num_planes; int ret; if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || - put_user(kp->index, &up->index) || - put_user(kp->type, &up->type) || - put_user(kp->flags, &up->flags) || - put_user(kp->memory, &up->memory)) + assign_in_user(&up->index, &kp->index) || + get_user(type, &kp->type) || + put_user(type, &up->type) || + assign_in_user(&up->flags, &kp->flags) || + get_user(memory, &kp->memory) || + put_user(memory, &up->memory)) return -EFAULT; - if (put_user(kp->bytesused, &up->bytesused) || - put_user(kp->field, &up->field) || - put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || - put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) || - copy_to_user(&up->timecode, &kp->timecode, sizeof(kp->timecode)) || - put_user(kp->sequence, &up->sequence) || - put_user(kp->reserved2, &up->reserved2) || - put_user(kp->reserved, &up->reserved) || - put_user(kp->length, &up->length)) + if (assign_in_user(&up->bytesused, &kp->bytesused) || + assign_in_user(&up->field, &kp->field) || + assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) || + assign_in_user(&up->timestamp.tv_usec, &kp->timestamp.tv_usec) || + copy_in_user(&up->timecode, &kp->timecode, sizeof(kp->timecode)) || + assign_in_user(&up->sequence, &kp->sequence) || + assign_in_user(&up->reserved2, &kp->reserved2) || + assign_in_user(&up->reserved, &kp->reserved) || + get_user(length, &kp->length) || + put_user(length, &up->length)) return -EFAULT; - if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { - num_planes = kp->length; + if (V4L2_TYPE_IS_MULTIPLANAR(type)) { + u32 num_planes = length; + if (num_planes == 0) return 0; - uplane = (__force struct v4l2_plane __user *)kp->m.planes; + if (get_user(uplane, ((__force struct v4l2_plane __user **)&kp->m.planes))) + return -EFAULT; if (get_user(p, &up->m.planes)) return -EFAULT; uplane32 = compat_ptr(p); - while (--num_planes >= 0) { - ret = put_v4l2_plane32(uplane, uplane32, kp->memory); + while (num_planes--) { + ret = put_v4l2_plane32(uplane, uplane32, memory); if (ret) return ret; ++uplane; ++uplane32; } } else { - switch (kp->memory) { + switch (memory) { case V4L2_MEMORY_MMAP: case V4L2_MEMORY_OVERLAY: - if (put_user(kp->m.offset, &up->m.offset)) + if (assign_in_user(&up->m.offset, &kp->m.offset)) return -EFAULT; break; case V4L2_MEMORY_USERPTR: - if (put_user(kp->m.userptr, &up->m.userptr)) + if (assign_in_user(&up->m.userptr, &kp->m.userptr)) return -EFAULT; break; case V4L2_MEMORY_DMABUF: - if (put_user(kp->m.fd, &up->m.fd)) + if (assign_in_user(&up->m.fd, &kp->m.fd)) return -EFAULT; break; } @@ -546,29 +666,32 @@ struct v4l2_framebuffer32 { } fmt; }; -static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up) +static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp, + struct v4l2_framebuffer32 __user *up) { - u32 tmp; + compat_caddr_t tmp; if (!access_ok(VERIFY_READ, up, sizeof(*up)) || get_user(tmp, &up->base) || - get_user(kp->capability, &up->capability) || - get_user(kp->flags, &up->flags) || - copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt))) + put_user((__force void *)compat_ptr(tmp), &kp->base) || + assign_in_user(&kp->capability, &up->capability) || + assign_in_user(&kp->flags, &up->flags) || + copy_in_user(&kp->fmt, &up->fmt, sizeof(kp->fmt))) return -EFAULT; - kp->base = (__force void *)compat_ptr(tmp); return 0; } -static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up) +static int put_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp, + struct v4l2_framebuffer32 __user *up) { - u32 tmp = (u32)((unsigned long)kp->base); + void *base; if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || - put_user(tmp, &up->base) || - put_user(kp->capability, &up->capability) || - put_user(kp->flags, &up->flags) || - copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt))) + get_user(base, &kp->base) || + put_user(ptr_to_compat(base), &up->base) || + assign_in_user(&up->capability, &kp->capability) || + assign_in_user(&up->flags, &kp->flags) || + copy_in_user(&up->fmt, &kp->fmt, sizeof(kp->fmt))) return -EFAULT; return 0; } @@ -585,18 +708,22 @@ struct v4l2_input32 { __u32 reserved[3]; }; -/* The 64-bit v4l2_input struct has extra padding at the end of the struct. - Otherwise it is identical to the 32-bit version. */ -static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up) +/* + * The 64-bit v4l2_input struct has extra padding at the end of the struct. + * Otherwise it is identical to the 32-bit version. + */ +static inline int get_v4l2_input32(struct v4l2_input __user *kp, + struct v4l2_input32 __user *up) { - if (copy_from_user(kp, up, sizeof(*up))) + if (copy_in_user(kp, up, sizeof(*up))) return -EFAULT; return 0; } -static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up) +static inline int put_v4l2_input32(struct v4l2_input __user *kp, + struct v4l2_input32 __user *up) { - if (copy_to_user(up, kp, sizeof(*up))) + if (copy_in_user(up, kp, sizeof(*up))) return -EFAULT; return 0; } @@ -650,41 +777,64 @@ static inline bool ctrl_is_pointer(struct file *file, u32 id) (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD); } +static int bufsize_v4l2_ext_controls(struct v4l2_ext_controls32 __user *up, + u32 *size) +{ + u32 count; + + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + get_user(count, &up->count)) + return -EFAULT; + if (count > V4L2_CID_MAX_CTRLS) + return -EINVAL; + *size = count * sizeof(struct v4l2_ext_control); + return 0; +} + static int get_v4l2_ext_controls32(struct file *file, - struct v4l2_ext_controls *kp, - struct v4l2_ext_controls32 __user *up) + struct v4l2_ext_controls __user *kp, + struct v4l2_ext_controls32 __user *up, + void __user *aux_buf, u32 aux_space) { struct v4l2_ext_control32 __user *ucontrols; struct v4l2_ext_control __user *kcontrols; - unsigned int n; + u32 count; + u32 n; compat_caddr_t p; if (!access_ok(VERIFY_READ, up, sizeof(*up)) || - get_user(kp->which, &up->which) || - get_user(kp->count, &up->count) || - get_user(kp->error_idx, &up->error_idx) || - copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved))) + assign_in_user(&kp->which, &up->which) || + get_user(count, &up->count) || + put_user(count, &kp->count) || + assign_in_user(&kp->error_idx, &up->error_idx) || + copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved))) return -EFAULT; - if (kp->count == 0) { - kp->controls = NULL; - return 0; - } else if (kp->count > V4L2_CID_MAX_CTRLS) { + + if (count == 0) + return put_user(NULL, &kp->controls); + if (count > V4L2_CID_MAX_CTRLS) return -EINVAL; - } if (get_user(p, &up->controls)) return -EFAULT; ucontrols = compat_ptr(p); - if (!access_ok(VERIFY_READ, ucontrols, kp->count * sizeof(*ucontrols))) + if (!access_ok(VERIFY_READ, ucontrols, count * sizeof(*ucontrols))) return -EFAULT; - kcontrols = compat_alloc_user_space(kp->count * sizeof(*kcontrols)); - kp->controls = (__force struct v4l2_ext_control *)kcontrols; - for (n = 0; n < kp->count; n++) { + if (aux_space < count * sizeof(*kcontrols)) + return -EFAULT; + kcontrols = aux_buf; + if (put_user((__force struct v4l2_ext_control *)kcontrols, + &kp->controls)) + return -EFAULT; + + for (n = 0; n < count; n++) { u32 id; if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols))) return -EFAULT; + if (get_user(id, &kcontrols->id)) return -EFAULT; + if (ctrl_is_pointer(file, id)) { void __user *s; @@ -701,43 +851,54 @@ static int get_v4l2_ext_controls32(struct file *file, } static int put_v4l2_ext_controls32(struct file *file, - struct v4l2_ext_controls *kp, + struct v4l2_ext_controls __user *kp, struct v4l2_ext_controls32 __user *up) { struct v4l2_ext_control32 __user *ucontrols; - struct v4l2_ext_control __user *kcontrols = - (__force struct v4l2_ext_control __user *)kp->controls; - int n = kp->count; + struct v4l2_ext_control __user *kcontrols; + u32 count; + u32 n; compat_caddr_t p; if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || - put_user(kp->which, &up->which) || - put_user(kp->count, &up->count) || - put_user(kp->error_idx, &up->error_idx) || - copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) + assign_in_user(&up->which, &kp->which) || + get_user(count, &kp->count) || + put_user(count, &up->count) || + assign_in_user(&up->error_idx, &kp->error_idx) || + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)) || + get_user(kcontrols, &kp->controls)) return -EFAULT; - if (!kp->count) - return 0; + if (!count) + return 0; if (get_user(p, &up->controls)) return -EFAULT; ucontrols = compat_ptr(p); - if (!access_ok(VERIFY_WRITE, ucontrols, n * sizeof(*ucontrols))) + if (!access_ok(VERIFY_WRITE, ucontrols, count * sizeof(*ucontrols))) return -EFAULT; - while (--n >= 0) { - unsigned size = sizeof(*ucontrols); + for (n = 0; n < count; n++) { + unsigned int size = sizeof(*ucontrols); u32 id; - if (get_user(id, &kcontrols->id)) + if (get_user(id, &kcontrols->id) || + put_user(id, &ucontrols->id) || + assign_in_user(&ucontrols->size, &kcontrols->size) || + copy_in_user(&ucontrols->reserved2, &kcontrols->reserved2, + sizeof(ucontrols->reserved2))) return -EFAULT; - /* Do not modify the pointer when copying a pointer control. - The contents of the pointer was changed, not the pointer - itself. */ + + /* + * Do not modify the pointer when copying a pointer control. + * The contents of the pointer was changed, not the pointer + * itself. + */ if (ctrl_is_pointer(file, id)) size -= sizeof(ucontrols->value64); + if (copy_in_user(ucontrols, kcontrols, size)) return -EFAULT; + ucontrols++; kcontrols++; } @@ -757,17 +918,18 @@ struct v4l2_event32 { __u32 reserved[8]; }; -static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up) +static int put_v4l2_event32(struct v4l2_event __user *kp, + struct v4l2_event32 __user *up) { if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || - put_user(kp->type, &up->type) || - copy_to_user(&up->u, &kp->u, sizeof(kp->u)) || - put_user(kp->pending, &up->pending) || - put_user(kp->sequence, &up->sequence) || - put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || - put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) || - put_user(kp->id, &up->id) || - copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved))) + assign_in_user(&up->type, &kp->type) || + copy_in_user(&up->u, &kp->u, sizeof(kp->u)) || + assign_in_user(&up->pending, &kp->pending) || + assign_in_user(&up->sequence, &kp->sequence) || + assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) || + assign_in_user(&up->timestamp.tv_nsec, &kp->timestamp.tv_nsec) || + assign_in_user(&up->id, &kp->id) || + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) return -EFAULT; return 0; } @@ -780,31 +942,34 @@ struct v4l2_edid32 { compat_caddr_t edid; }; -static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) +static int get_v4l2_edid32(struct v4l2_edid __user *kp, + struct v4l2_edid32 __user *up) { - u32 tmp; + compat_uptr_t tmp; if (!access_ok(VERIFY_READ, up, sizeof(*up)) || - get_user(kp->pad, &up->pad) || - get_user(kp->start_block, &up->start_block) || - get_user(kp->blocks, &up->blocks) || + assign_in_user(&kp->pad, &up->pad) || + assign_in_user(&kp->start_block, &up->start_block) || + assign_in_user(&kp->blocks, &up->blocks) || get_user(tmp, &up->edid) || - copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved))) + put_user(compat_ptr(tmp), &kp->edid) || + copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved))) return -EFAULT; - kp->edid = (__force u8 *)compat_ptr(tmp); return 0; } -static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) +static int put_v4l2_edid32(struct v4l2_edid __user *kp, + struct v4l2_edid32 __user *up) { - u32 tmp = (u32)((unsigned long)kp->edid); + void *edid; if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || - put_user(kp->pad, &up->pad) || - put_user(kp->start_block, &up->start_block) || - put_user(kp->blocks, &up->blocks) || - put_user(tmp, &up->edid) || - copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) + assign_in_user(&up->pad, &kp->pad) || + assign_in_user(&up->start_block, &kp->start_block) || + assign_in_user(&up->blocks, &kp->blocks) || + get_user(edid, &kp->edid) || + put_user(ptr_to_compat(edid), &up->edid) || + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) return -EFAULT; return 0; } @@ -837,22 +1002,23 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) #define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32) #define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32) +static int alloc_userspace(unsigned int size, u32 aux_space, + void __user **up_native) +{ + *up_native = compat_alloc_user_space(size + aux_space); + if (!*up_native) + return -ENOMEM; + if (clear_user(*up_native, size)) + return -EFAULT; + return 0; +} + static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - union { - struct v4l2_format v2f; - struct v4l2_buffer v2b; - struct v4l2_framebuffer v2fb; - struct v4l2_input v2i; - struct v4l2_standard v2s; - struct v4l2_ext_controls v2ecs; - struct v4l2_event v2ev; - struct v4l2_create_buffers v2crt; - struct v4l2_edid v2edid; - unsigned long vx; - int vi; - } karg; void __user *up = compat_ptr(arg); + void __user *up_native = NULL; + void __user *aux_buf; + u32 aux_space; int compatible_arg = 1; long err = 0; @@ -891,30 +1057,52 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar case VIDIOC_STREAMOFF: case VIDIOC_S_INPUT: case VIDIOC_S_OUTPUT: - err = get_user(karg.vi, (s32 __user *)up); + err = alloc_userspace(sizeof(unsigned int), 0, &up_native); + if (!err && assign_in_user((unsigned int __user *)up_native, + (compat_uint_t __user *)up)) + err = -EFAULT; compatible_arg = 0; break; case VIDIOC_G_INPUT: case VIDIOC_G_OUTPUT: + err = alloc_userspace(sizeof(unsigned int), 0, &up_native); compatible_arg = 0; break; case VIDIOC_G_EDID: case VIDIOC_S_EDID: - err = get_v4l2_edid32(&karg.v2edid, up); + err = alloc_userspace(sizeof(struct v4l2_edid), 0, &up_native); + if (!err) + err = get_v4l2_edid32(up_native, up); compatible_arg = 0; break; case VIDIOC_G_FMT: case VIDIOC_S_FMT: case VIDIOC_TRY_FMT: - err = get_v4l2_format32(&karg.v2f, up); + err = bufsize_v4l2_format(up, &aux_space); + if (!err) + err = alloc_userspace(sizeof(struct v4l2_format), + aux_space, &up_native); + if (!err) { + aux_buf = up_native + sizeof(struct v4l2_format); + err = get_v4l2_format32(up_native, up, + aux_buf, aux_space); + } compatible_arg = 0; break; case VIDIOC_CREATE_BUFS: - err = get_v4l2_create32(&karg.v2crt, up); + err = bufsize_v4l2_create(up, &aux_space); + if (!err) + err = alloc_userspace(sizeof(struct v4l2_create_buffers), + aux_space, &up_native); + if (!err) { + aux_buf = up_native + sizeof(struct v4l2_create_buffers); + err = get_v4l2_create32(up_native, up, + aux_buf, aux_space); + } compatible_arg = 0; break; @@ -922,36 +1110,63 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar case VIDIOC_QUERYBUF: case VIDIOC_QBUF: case VIDIOC_DQBUF: - err = get_v4l2_buffer32(&karg.v2b, up); + err = bufsize_v4l2_buffer(up, &aux_space); + if (!err) + err = alloc_userspace(sizeof(struct v4l2_buffer), + aux_space, &up_native); + if (!err) { + aux_buf = up_native + sizeof(struct v4l2_buffer); + err = get_v4l2_buffer32(up_native, up, + aux_buf, aux_space); + } compatible_arg = 0; break; case VIDIOC_S_FBUF: - err = get_v4l2_framebuffer32(&karg.v2fb, up); + err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0, + &up_native); + if (!err) + err = get_v4l2_framebuffer32(up_native, up); compatible_arg = 0; break; case VIDIOC_G_FBUF: + err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0, + &up_native); compatible_arg = 0; break; case VIDIOC_ENUMSTD: - err = get_v4l2_standard32(&karg.v2s, up); + err = alloc_userspace(sizeof(struct v4l2_standard), 0, + &up_native); + if (!err) + err = get_v4l2_standard32(up_native, up); compatible_arg = 0; break; case VIDIOC_ENUMINPUT: - err = get_v4l2_input32(&karg.v2i, up); + err = alloc_userspace(sizeof(struct v4l2_input), 0, &up_native); + if (!err) + err = get_v4l2_input32(up_native, up); compatible_arg = 0; break; case VIDIOC_G_EXT_CTRLS: case VIDIOC_S_EXT_CTRLS: case VIDIOC_TRY_EXT_CTRLS: - err = get_v4l2_ext_controls32(file, &karg.v2ecs, up); + err = bufsize_v4l2_ext_controls(up, &aux_space); + if (!err) + err = alloc_userspace(sizeof(struct v4l2_ext_controls), + aux_space, &up_native); + if (!err) { + aux_buf = up_native + sizeof(struct v4l2_ext_controls); + err = get_v4l2_ext_controls32(file, up_native, up, + aux_buf, aux_space); + } compatible_arg = 0; break; case VIDIOC_DQEVENT: + err = alloc_userspace(sizeof(struct v4l2_event), 0, &up_native); compatible_arg = 0; break; } @@ -960,29 +1175,26 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar if (compatible_arg) err = native_ioctl(file, cmd, (unsigned long)up); - else { - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - err = native_ioctl(file, cmd, (unsigned long)&karg); - set_fs(old_fs); - } + else + err = native_ioctl(file, cmd, (unsigned long)up_native); if (err == -ENOTTY) return err; - /* Special case: even after an error we need to put the - results back for these ioctls since the error_idx will - contain information on which control failed. */ + /* + * Special case: even after an error we need to put the + * results back for these ioctls since the error_idx will + * contain information on which control failed. + */ switch (cmd) { case VIDIOC_G_EXT_CTRLS: case VIDIOC_S_EXT_CTRLS: case VIDIOC_TRY_EXT_CTRLS: - if (put_v4l2_ext_controls32(file, &karg.v2ecs, up)) + if (put_v4l2_ext_controls32(file, up_native, up)) err = -EFAULT; break; case VIDIOC_S_EDID: - if (put_v4l2_edid32(&karg.v2edid, up)) + if (put_v4l2_edid32(up_native, up)) err = -EFAULT; break; } @@ -994,44 +1206,46 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar case VIDIOC_S_OUTPUT: case VIDIOC_G_INPUT: case VIDIOC_G_OUTPUT: - err = put_user(((s32)karg.vi), (s32 __user *)up); + if (assign_in_user((compat_uint_t __user *)up, + ((unsigned int __user *)up_native))) + err = -EFAULT; break; case VIDIOC_G_FBUF: - err = put_v4l2_framebuffer32(&karg.v2fb, up); + err = put_v4l2_framebuffer32(up_native, up); break; case VIDIOC_DQEVENT: - err = put_v4l2_event32(&karg.v2ev, up); + err = put_v4l2_event32(up_native, up); break; case VIDIOC_G_EDID: - err = put_v4l2_edid32(&karg.v2edid, up); + err = put_v4l2_edid32(up_native, up); break; case VIDIOC_G_FMT: case VIDIOC_S_FMT: case VIDIOC_TRY_FMT: - err = put_v4l2_format32(&karg.v2f, up); + err = put_v4l2_format32(up_native, up); break; case VIDIOC_CREATE_BUFS: - err = put_v4l2_create32(&karg.v2crt, up); + err = put_v4l2_create32(up_native, up); break; case VIDIOC_PREPARE_BUF: case VIDIOC_QUERYBUF: case VIDIOC_QBUF: case VIDIOC_DQBUF: - err = put_v4l2_buffer32(&karg.v2b, up); + err = put_v4l2_buffer32(up_native, up); break; case VIDIOC_ENUMSTD: - err = put_v4l2_standard32(&karg.v2s, up); + err = put_v4l2_standard32(up_native, up); break; case VIDIOC_ENUMINPUT: - err = put_v4l2_input32(&karg.v2i, up); + err = put_v4l2_input32(up_native, up); break; } return err; From 1a4834092539674b68f03f9b4f13f48c6b424a27 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 14 Feb 2018 12:44:34 +0100 Subject: [PATCH 1915/3326] media: v4l2-compat-ioctl32.c: make ctrl_is_pointer work for subdevs commit 273caa260035c03d89ad63d72d8cd3d9e5c5e3f1 upstream. If the device is of type VFL_TYPE_SUBDEV then vdev->ioctl_ops is NULL so the 'if (!ops->vidioc_query_ext_ctrl)' check would crash. Add a test for !ops to the condition. All sub-devices that have controls will use the control framework, so they do not have an equivalent to ops->vidioc_query_ext_ctrl. Returning false if ops is NULL is the correct thing to do here. Fixes: b8c601e8af ("v4l2-compat-ioctl32.c: fix ctrl_is_pointer") Signed-off-by: Hans Verkuil Acked-by: Sakari Ailus Reported-by: Laurent Pinchart Reviewed-by: Laurent Pinchart Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index ec4a69728ed42b..cbeea8343a5c21 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -770,7 +770,7 @@ static inline bool ctrl_is_pointer(struct file *file, u32 id) return ctrl && ctrl->is_ptr; } - if (!ops->vidioc_query_ext_ctrl) + if (!ops || !ops->vidioc_query_ext_ctrl) return false; return !ops->vidioc_query_ext_ctrl(file, fh, &qec) && From f428567e005fa2a7f630266cb48f2036d7f01698 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Mon, 5 Feb 2018 11:15:52 +0200 Subject: [PATCH 1916/3326] crypto: caam - fix endless loop when DECO acquire fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 225ece3e7dad4cfc44cca38ce7a3a80f255ea8f1 upstream. In case DECO0 cannot be acquired - i.e. run_descriptor_deco0() fails with -ENODEV, caam_probe() enters an endless loop: run_descriptor_deco0 ret -ENODEV -> instantiate_rng -ENODEV, overwritten by -EAGAIN ret -EAGAIN -> caam_probe -EAGAIN results in endless loop It turns out the error path in instantiate_rng() is incorrect, the checks are done in the wrong order. Fixes: 1005bccd7a4a6 ("crypto: caam - enable instantiation of all RNG4 state handles") Reported-by: Bryan O'Donoghue Suggested-by: Auer Lukas Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/caam/ctrl.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 027e121c6f70aa..e1d4ae1153c4fb 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, * without any error (HW optimizations for later * CAAM eras), then try again. */ + if (ret) + break; + rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || - !(rdsta_val & (1 << sh_idx))) + !(rdsta_val & (1 << sh_idx))) { ret = -EAGAIN; - if (ret) break; + } + dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); /* Clear the contents before recreating the descriptor */ memset(desc, 0x00, CAAM_CMD_SZ * 7); From f37a798e7724b2b8c008a45e7604bc2cc75870d2 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 24 Jan 2018 00:31:27 -0800 Subject: [PATCH 1917/3326] crypto: sha512-mb - initialize pending lengths correctly commit eff84b379089cd8b4e83599639c1f5f6e34ef7bf upstream. The SHA-512 multibuffer code keeps track of the number of blocks pending in each lane. The minimum of these values is used to identify the next lane that will be completed. Unused lanes are set to a large number (0xFFFFFFFF) so that they don't affect this calculation. However, it was forgotten to set the lengths to this value in the initial state, where all lanes are unused. As a result it was possible for sha512_mb_mgr_get_comp_job_avx2() to select an unused lane, causing a NULL pointer dereference. Specifically this could happen in the case where ->update() was passed fewer than SHA512_BLOCK_SIZE bytes of data, so it then called sha_complete_job() without having actually submitted any blocks to the multi-buffer code. This hit a NULL pointer dereference if another task happened to have submitted blocks concurrently to the same CPU and the flush timer had not yet expired. Fix this by initializing sha512_mb_mgr->lens correctly. As usual, this bug was found by syzkaller. Fixes: 45691e2d9b18 ("crypto: sha512-mb - submit/flush routines for AVX2") Reported-by: syzbot Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c index 36870b26067a73..d08805032f0193 100644 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c +++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c @@ -57,10 +57,12 @@ void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state) { unsigned int j; - state->lens[0] = 0; - state->lens[1] = 1; - state->lens[2] = 2; - state->lens[3] = 3; + /* initially all lanes are unused */ + state->lens[0] = 0xFFFFFFFF00000000; + state->lens[1] = 0xFFFFFFFF00000001; + state->lens[2] = 0xFFFFFFFF00000002; + state->lens[3] = 0xFFFFFFFF00000003; + state->unused_lanes = 0xFF03020100; for (j = 0; j < 4; j++) state->ldata[j].job_in_lane = NULL; From 7dffdb31ad13e37c3037185d3ac04a5e44ebacbd Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Feb 2018 17:56:06 +0000 Subject: [PATCH 1918/3326] arm: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls commit 20e8175d246e9f9deb377f2784b3e7dfb2ad3e86 upstream. KVM doesn't follow the SMCCC when it comes to unimplemented calls, and inject an UNDEF instead of returning an error. Since firmware calls are now used for security mitigation, they are becoming more common, and the undef is counter productive. Instead, let's follow the SMCCC which states that -1 must be returned to the caller when getting an unknown function number. Tested-by: Ard Biesheuvel Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Greg Kroah-Hartman --- arch/arm/kvm/handle_exit.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 76be5701f9d6ab..910bd8dabb3c03 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -38,7 +38,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) ret = kvm_hvc_call_handler(vcpu); if (ret < 0) { - kvm_inject_undefined(vcpu); + vcpu_set_reg(vcpu, 0, ~0UL); return 1; } @@ -47,7 +47,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) { - kvm_inject_undefined(vcpu); + /* + * "If an SMC instruction executed at Non-secure EL1 is + * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a + * Trap exception, not a Secure Monitor Call exception [...]" + * + * We need to advance the PC after the trap, as it would + * otherwise return to the same address... + */ + vcpu_set_reg(vcpu, 0, ~0UL); + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); return 1; } From 6bad51166f875751657d11b9ff9cd79540302c90 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Thu, 9 Nov 2017 20:27:20 +0200 Subject: [PATCH 1919/3326] KVM: nVMX: Fix races when sending nested PI while dest enters/leaves L2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 6b6977117f50d60455ace86b2d256f6fb4f3de05 upstream. Consider the following scenario: 1. CPU A calls vmx_deliver_nested_posted_interrupt() to send an IPI to CPU B via virtual posted-interrupt mechanism. 2. CPU B is currently executing L2 guest. 3. vmx_deliver_nested_posted_interrupt() calls kvm_vcpu_trigger_posted_interrupt() which will note that vcpu->mode == IN_GUEST_MODE. 4. Assume that before CPU A sends the physical POSTED_INTR_NESTED_VECTOR IPI, CPU B exits from L2 to L0 during event-delivery (valid IDT-vectoring-info). 5. CPU A now sends the physical IPI. The IPI is received in host and it's handler (smp_kvm_posted_intr_nested_ipi()) does nothing. 6. Assume that before CPU A sets pi_pending=true and KVM_REQ_EVENT, CPU B continues to run in L0 and reach vcpu_enter_guest(). As KVM_REQ_EVENT is not set yet, vcpu_enter_guest() will continue and resume L2 guest. 7. At this point, CPU A sets pi_pending=true and KVM_REQ_EVENT but it's too late! CPU B already entered L2 and KVM_REQ_EVENT will only be consumed at next L2 entry! Another scenario to consider: 1. CPU A calls vmx_deliver_nested_posted_interrupt() to send an IPI to CPU B via virtual posted-interrupt mechanism. 2. Assume that before CPU A calls kvm_vcpu_trigger_posted_interrupt(), CPU B is at L0 and is about to resume into L2. Further assume that it is in vcpu_enter_guest() after check for KVM_REQ_EVENT. 3. At this point, CPU A calls kvm_vcpu_trigger_posted_interrupt() which will note that vcpu->mode != IN_GUEST_MODE. Therefore, do nothing and return false. Then, will set pi_pending=true and KVM_REQ_EVENT. 4. Now CPU B continue and resumes into L2 guest without processing the posted-interrupt until next L2 entry! To fix both issues, we just need to change vmx_deliver_nested_posted_interrupt() to set pi_pending=true and KVM_REQ_EVENT before calling kvm_vcpu_trigger_posted_interrupt(). It will fix the first scenario by chaging step (6) to note that KVM_REQ_EVENT and pi_pending=true and therefore process nested posted-interrupt. It will fix the second scenario by two possible ways: 1. If kvm_vcpu_trigger_posted_interrupt() is called while CPU B has changed vcpu->mode to IN_GUEST_MODE, physical IPI will be sent and will be received when CPU resumes into L2. 2. If kvm_vcpu_trigger_posted_interrupt() is called while CPU B hasn't yet changed vcpu->mode to IN_GUEST_MODE, then after CPU B will change vcpu->mode it will call kvm_request_pending() which will return true and therefore force another round of vcpu_enter_guest() which will note that KVM_REQ_EVENT and pi_pending=true and therefore process nested posted-interrupt. Fixes: 705699a13994 ("KVM: nVMX: Enable nested posted interrupt processing") Signed-off-by: Liran Alon Reviewed-by: Nikita Leshenko Reviewed-by: Krish Sadhukhan [Add kvm_vcpu_kick to also handle the case where L1 doesn't intercept L2 HLT and L2 executes HLT instruction. - Paolo] Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0ae4b1a861683c..75a273bc1c52ae 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5322,14 +5322,15 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, if (is_guest_mode(vcpu) && vector == vmx->nested.posted_intr_nv) { - /* the PIR and ON have been set by L1. */ - kvm_vcpu_trigger_posted_interrupt(vcpu, true); /* * If a posted intr is not recognized by hardware, * we will accomplish it in the next vmentry. */ vmx->nested.pi_pending = true; kvm_make_request(KVM_REQ_EVENT, vcpu); + /* the PIR and ON have been set by L1. */ + if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true)) + kvm_vcpu_kick(vcpu); return 0; } return -1; From fe90a3a6f88179dfa427a758dbef47cb371b6be7 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Sun, 19 Nov 2017 18:25:43 +0200 Subject: [PATCH 1920/3326] KVM: nVMX: Fix bug of injecting L2 exception into L1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 5c7d4f9ad39d980728b39752304ce10bb2960cbf upstream. kvm_clear_exception_queue() should clear pending exception. This also includes exceptions which were only marked pending but not yet injected. This is because exception.pending is used for both L1 and L2 to determine if an exception should be raised to guest. Note that an exception which is pending but not yet injected will be raised again once the guest will be resumed. Consider the following scenario: 1) L0 KVM with ignore_msrs=false. 2) L1 prepare vmcs12 with the following: a) No intercepts on MSR (MSR_BITMAP exist and is filled with 0). b) No intercept for #GP. c) vmx-preemption-timer is configured. 3) L1 enters into L2. 4) L2 reads an unhandled MSR that exists in MSR_BITMAP (such as 0x1fff). L2 RDMSR could be handled as described below: 1) L2 exits to L0 on RDMSR and calls handle_rdmsr(). 2) handle_rdmsr() calls kvm_inject_gp() which sets KVM_REQ_EVENT, exception.pending=true and exception.injected=false. 3) vcpu_enter_guest() consumes KVM_REQ_EVENT and calls inject_pending_event() which calls vmx_check_nested_events() which sees that exception.pending=true but nested_vmx_check_exception() returns 0 and therefore does nothing at this point. However let's assume it later sees vmx-preemption-timer expired and therefore exits from L2 to L1 by calling nested_vmx_vmexit(). 4) nested_vmx_vmexit() calls prepare_vmcs12() which calls vmcs12_save_pending_event() but it does nothing as exception.injected is false. Also prepare_vmcs12() calls kvm_clear_exception_queue() which does nothing as exception.injected is already false. 5) We now return from vmx_check_nested_events() with 0 while still having exception.pending=true! 6) Therefore inject_pending_event() continues and we inject L2 exception to L1!... This commit will fix above issue by changing step (4) to clear exception.pending in kvm_clear_exception_queue(). Fixes: 664f8e26b00c ("KVM: X86: Fix loss of exception which has not yet been injected") Signed-off-by: Liran Alon Reviewed-by: Nikita Leshenko Reviewed-by: Krish Sadhukhan Signed-off-by: Krish Sadhukhan Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 1 - arch/x86/kvm/x86.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 75a273bc1c52ae..0ea909ca45c2e3 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11246,7 +11246,6 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) if (block_nested_events) return -EBUSY; nested_vmx_inject_exception_vmexit(vcpu, exit_qual); - vcpu->arch.exception.pending = false; return 0; } diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index d0b95b7a90b4ec..6d112d8f799cdd 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -12,6 +12,7 @@ static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) { + vcpu->arch.exception.pending = false; vcpu->arch.exception.injected = false; } From 88b64450cc0a9340145331b2ffefdff15a0f96d3 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 11 Jan 2018 14:31:43 +1100 Subject: [PATCH 1921/3326] KVM: PPC: Book3S HV: Make sure we don't re-enter guest without XIVE loaded commit 43ff3f65234061e08d234bdef5a9aadc19832b74 upstream. This fixes a bug where it is possible to enter a guest on a POWER9 system without having the XIVE (interrupt controller) context loaded. This can happen because we unload the XIVE context from the CPU before doing the real-mode handling for machine checks. After the real-mode handler runs, it is possible that we re-enter the guest via a fast path which does not load the XIVE context. To fix this, we move the unloading of the XIVE context to come after the real-mode machine check handler is called. Fixes: 5af50993850a ("KVM: PPC: Book3S HV: Native usage of the XIVE interrupt controller") Signed-off-by: Paul Mackerras Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 40 ++++++++++++------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index c85ac5c83bd4fd..2b3194b9608fdb 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1387,6 +1387,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) blt deliver_guest_interrupt guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ + /* Save more register state */ + mfdar r6 + mfdsisr r7 + std r6, VCPU_DAR(r9) + stw r7, VCPU_DSISR(r9) + /* don't overwrite fault_dar/fault_dsisr if HDSI */ + cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE + beq mc_cont + std r6, VCPU_FAULT_DAR(r9) + stw r7, VCPU_FAULT_DSISR(r9) + + /* See if it is a machine check */ + cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK + beq machine_check_realmode +mc_cont: +#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING + addi r3, r9, VCPU_TB_RMEXIT + mr r4, r9 + bl kvmhv_accumulate_time +#endif #ifdef CONFIG_KVM_XICS /* We are exiting, pull the VP from the XIVE */ lwz r0, VCPU_XIVE_PUSHED(r9) @@ -1424,26 +1444,6 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ eieio 1: #endif /* CONFIG_KVM_XICS */ - /* Save more register state */ - mfdar r6 - mfdsisr r7 - std r6, VCPU_DAR(r9) - stw r7, VCPU_DSISR(r9) - /* don't overwrite fault_dar/fault_dsisr if HDSI */ - cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE - beq mc_cont - std r6, VCPU_FAULT_DAR(r9) - stw r7, VCPU_FAULT_DSISR(r9) - - /* See if it is a machine check */ - cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK - beq machine_check_realmode -mc_cont: -#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING - addi r3, r9, VCPU_TB_RMEXIT - mr r4, r9 - bl kvmhv_accumulate_time -#endif mr r3, r12 /* Increment exit count, poke other threads to exit */ From be54d79b43fdb796787d004afbacef57bbab3074 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 30 Jan 2018 10:51:32 +1100 Subject: [PATCH 1922/3326] KVM: PPC: Book3S HV: Drop locks before reading guest memory commit 36ee41d161c67a6fcf696d4817a0da31f778938c upstream. Running with CONFIG_DEBUG_ATOMIC_SLEEP reveals that HV KVM tries to read guest memory, in order to emulate guest instructions, while preempt is disabled and a vcore lock is held. This occurs in kvmppc_handle_exit_hv(), called from post_guest_process(), when emulating guest doorbell instructions on POWER9 systems, and also when checking whether we have hit a hypervisor breakpoint. Reading guest memory can cause a page fault and thus cause the task to sleep, so we need to avoid reading guest memory while holding a spinlock or when preempt is disabled. To fix this, we move the preempt_enable() in kvmppc_run_core() to before the loop that calls post_guest_process() for each vcore that has just run, and we drop and re-take the vcore lock around the calls to kvmppc_emulate_debug_inst() and kvmppc_emulate_doorbell_instr(). Dropping the lock is safe with respect to the iteration over the runnable vcpus in post_guest_process(); for_each_runnable_thread is actually safe to use locklessly. It is possible for a vcpu to become runnable and add itself to the runnable_threads array (code near the beginning of kvmppc_run_vcpu()) and then get included in the iteration in post_guest_process despite the fact that it has not just run. This is benign because vcpu->arch.trap and vcpu->arch.ceded will be zero. Fixes: 579006944e0d ("KVM: PPC: Book3S HV: Virtualize doorbell facility on POWER9") Signed-off-by: Paul Mackerras Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kvm/book3s_hv.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8d43cf205d3487..f48e3379a18a88 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -999,8 +999,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tvcpu; - if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return EMULATE_FAIL; if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE) return RESUME_GUEST; if (get_op(inst) != 31) @@ -1050,6 +1048,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +/* Called with vcpu->arch.vcore->lock held */ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, struct task_struct *tsk) { @@ -1169,7 +1168,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, swab32(vcpu->arch.emul_inst) : vcpu->arch.emul_inst; if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { + /* Need vcore unlocked to call kvmppc_get_last_inst */ + spin_unlock(&vcpu->arch.vcore->lock); r = kvmppc_emulate_debug_inst(run, vcpu); + spin_lock(&vcpu->arch.vcore->lock); } else { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -1184,8 +1186,13 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, */ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: r = EMULATE_FAIL; - if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) + if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && + cpu_has_feature(CPU_FTR_ARCH_300)) { + /* Need vcore unlocked to call kvmppc_get_last_inst */ + spin_unlock(&vcpu->arch.vcore->lock); r = kvmppc_emulate_doorbell_instr(vcpu); + spin_lock(&vcpu->arch.vcore->lock); + } if (r == EMULATE_FAIL) { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -2889,13 +2896,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) /* make sure updates to secondary vcpu structs are visible now */ smp_mb(); + preempt_enable(); + for (sub = 0; sub < core_info.n_subcores; ++sub) { pvc = core_info.vc[sub]; post_guest_process(pvc, pvc == vc); } spin_lock(&vc->lock); - preempt_enable(); out: vc->vcore_state = VCORE_INACTIVE; From 985bf39913428d1797427e779d747ae76b144901 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 22 Jan 2018 18:19:06 +0000 Subject: [PATCH 1923/3326] KVM: arm/arm64: Handle CPU_PM_ENTER_FAILED commit 58d6b15e9da5042a99c9c30ad725792e4569150e upstream. cpu_pm_enter() calls the pm notifier chain with CPU_PM_ENTER, then if there is a failure: CPU_PM_ENTER_FAILED. When KVM receives CPU_PM_ENTER it calls cpu_hyp_reset() which will return us to the hyp-stub. If we subsequently get a CPU_PM_ENTER_FAILED, KVM does nothing, leaving the CPU running with the hyp-stub, at odds with kvm_arm_hardware_enabled. Add CPU_PM_ENTER_FAILED as a fallthrough for CPU_PM_EXIT, this reloads KVM based on kvm_arm_hardware_enabled. This is safe even if CPU_PM_ENTER never gets as far as KVM, as cpu_hyp_reinit() calls cpu_hyp_reset() to make sure the hyp-stub is loaded before reloading KVM. Fixes: 67f691976662 ("arm64: kvm: allows kvm cpu hotplug") CC: Lorenzo Pieralisi Reviewed-by: Christoffer Dall Signed-off-by: James Morse Signed-off-by: Christoffer Dall Signed-off-by: Greg Kroah-Hartman --- virt/kvm/arm/arm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 385ba604a9b20b..8b6c42dc1aa98f 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1220,6 +1220,7 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self, cpu_hyp_reset(); return NOTIFY_OK; + case CPU_PM_ENTER_FAILED: case CPU_PM_EXIT: if (__this_cpu_read(kvm_arm_hardware_enabled)) /* The hardware was enabled before suspend. */ From e747a02d9fcf76c4146a5f151e2136591df5b0a3 Mon Sep 17 00:00:00 2001 From: Ulf Magnusson Date: Mon, 5 Feb 2018 02:21:14 +0100 Subject: [PATCH 1924/3326] KVM: PPC: Book3S PR: Fix broken select due to misspelling commit 57ea5f161a7de5b1913c212d04f57a175b159fdf upstream. Commit 76d837a4c0f9 ("KVM: PPC: Book3S PR: Don't include SPAPR TCE code on non-pseries platforms") added a reference to the globally undefined symbol PPC_SERIES. Looking at the rest of the commit, PPC_PSERIES was probably intended. Change PPC_SERIES to PPC_PSERIES. Discovered with the https://github.com/ulfalizer/Kconfiglib/blob/master/examples/list_undefined.py script. Fixes: 76d837a4c0f9 ("KVM: PPC: Book3S PR: Don't include SPAPR TCE code on non-pseries platforms") Signed-off-by: Ulf Magnusson Signed-off-by: Paul Mackerras Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kvm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index b12b8eb39c2978..648160334abf5c 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -68,7 +68,7 @@ config KVM_BOOK3S_64 select KVM_BOOK3S_64_HANDLER select KVM select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE - select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV) + select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_PSERIES || PPC_POWERNV) ---help--- Support running unmodified book3s_64 and book3s_32 guest kernels in virtual machines on book3s_64 host processors. From 4562bfdeac8708371dec016f868a2a18ec6d1285 Mon Sep 17 00:00:00 2001 From: John Keeping Date: Mon, 8 Jan 2018 16:01:04 +0000 Subject: [PATCH 1925/3326] ASoC: rockchip: i2s: fix playback after runtime resume commit c66234cfedfc3e6e3b62563a5f2c1562be09a35d upstream. When restoring registers during runtime resume, we must not write to I2S_TXDR which is the transmit FIFO as this queues up a sample to be output and pushes all of the output channels down by one. This can be demonstrated with the speaker-test utility: for i in a b c; do speaker-test -c 2 -s 1; done which should play a test through the left speaker three times but if the I2S hardware starts runtime suspended the first sample will be played through the right speaker. Fix this by marking I2S_TXDR as volatile (which also requires marking it as readble, even though it technically isn't). This seems to be the most robust fix, the alternative of giving I2S_TXDR a default value is more fragile since it does not prevent regcache writing to the register in all circumstances. While here, also fix the configuration of I2S_RXDR and I2S_FIFOLR; these are not writable so they do not suffer from the same problem as I2S_TXDR but reading from I2S_RXDR does suffer from a similar problem. Fixes: f0447f6cbb20 ("ASoC: rockchip: i2s: restore register during runtime_suspend/resume cycle", 2016-09-07) Signed-off-by: John Keeping Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/rockchip/rockchip_i2s.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c index b6590467fd140c..66fc13a2396a04 100644 --- a/sound/soc/rockchip/rockchip_i2s.c +++ b/sound/soc/rockchip/rockchip_i2s.c @@ -504,6 +504,7 @@ static bool rockchip_i2s_rd_reg(struct device *dev, unsigned int reg) case I2S_INTCR: case I2S_XFER: case I2S_CLR: + case I2S_TXDR: case I2S_RXDR: case I2S_FIFOLR: case I2S_INTSR: @@ -518,6 +519,9 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg) switch (reg) { case I2S_INTSR: case I2S_CLR: + case I2S_FIFOLR: + case I2S_TXDR: + case I2S_RXDR: return true; default: return false; @@ -527,6 +531,8 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg) static bool rockchip_i2s_precious_reg(struct device *dev, unsigned int reg) { switch (reg) { + case I2S_RXDR: + return true; default: return false; } From b897f1dc780e1ac019977297f2f32369776a30b1 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 3 Jan 2018 16:38:46 +0100 Subject: [PATCH 1926/3326] ASoC: skl: Fix kernel warning due to zero NHTL entry commit 20a1ea2222e7cbf96e9bf8579362e971491e6aea upstream. I got the following kernel warning when loading snd-soc-skl module on Dell Latitude 7270 laptop: memremap attempted on mixed range 0x0000000000000000 size: 0x0 WARNING: CPU: 0 PID: 484 at kernel/memremap.c:98 memremap+0x8a/0x180 Call Trace: skl_nhlt_init+0x82/0xf0 [snd_soc_skl] skl_probe+0x2ee/0x7c0 [snd_soc_skl] .... It seems that the machine doesn't support the SKL DSP gives the empty NHLT entry, and it triggers the warning. For avoiding it, let do the zero check before calling memremap(). Signed-off-by: Takashi Iwai Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/intel/skylake/skl-nhlt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c index e7d766d56c8e7d..55859c5b456f17 100644 --- a/sound/soc/intel/skylake/skl-nhlt.c +++ b/sound/soc/intel/skylake/skl-nhlt.c @@ -41,7 +41,8 @@ struct nhlt_acpi_table *skl_nhlt_init(struct device *dev) obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL); if (obj && obj->type == ACPI_TYPE_BUFFER) { nhlt_ptr = (struct nhlt_resource_desc *)obj->buffer.pointer; - nhlt_table = (struct nhlt_acpi_table *) + if (nhlt_ptr->length) + nhlt_table = (struct nhlt_acpi_table *) memremap(nhlt_ptr->min_addr, nhlt_ptr->length, MEMREMAP_WB); ACPI_FREE(obj); From e06f7b686daa871c4a4cf745be30fd1864b6f9c8 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Mon, 1 Jan 2018 18:26:47 +0100 Subject: [PATCH 1927/3326] watchdog: imx2_wdt: restore previous timeout after suspend+resume commit 0be267255cef64e1c58475baa7b25568355a3816 upstream. When the watchdog device is suspended, its timeout is set to the maximum value. During resume, the previously set timeout should be restored. This does not work at the moment. The suspend function calls imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); and resume reverts this by calling imx2_wdt_set_timeout(wdog, wdog->timeout); However, imx2_wdt_set_timeout() updates wdog->timeout. Therefore, wdog->timeout is set to IMX2_WDT_MAX_TIME when we enter the resume function. Fix this by adding a new function __imx2_wdt_set_timeout() which only updates the hardware settings. imx2_wdt_set_timeout() now calls __imx2_wdt_set_timeout() and then saves the new timeout to wdog->timeout. During suspend, we call __imx2_wdt_set_timeout() directly so that wdog->timeout won't be updated and we can restore the previous value during resume. This approach makes wdog->timeout different from the actual setting in the hardware which is usually not a good thing. However, the two differ only while we're suspended and no kernel code is running, so it should be ok in this case. Signed-off-by: Martin Kaiser Reviewed-by: Guenter Roeck Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck Signed-off-by: Greg Kroah-Hartman --- drivers/watchdog/imx2_wdt.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index 4874b0f18650f7..518dfa1047cbd5 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -169,15 +169,21 @@ static int imx2_wdt_ping(struct watchdog_device *wdog) return 0; } -static int imx2_wdt_set_timeout(struct watchdog_device *wdog, - unsigned int new_timeout) +static void __imx2_wdt_set_timeout(struct watchdog_device *wdog, + unsigned int new_timeout) { struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); - wdog->timeout = new_timeout; - regmap_update_bits(wdev->regmap, IMX2_WDT_WCR, IMX2_WDT_WCR_WT, WDOG_SEC_TO_COUNT(new_timeout)); +} + +static int imx2_wdt_set_timeout(struct watchdog_device *wdog, + unsigned int new_timeout) +{ + __imx2_wdt_set_timeout(wdog, new_timeout); + + wdog->timeout = new_timeout; return 0; } @@ -371,7 +377,11 @@ static int imx2_wdt_suspend(struct device *dev) /* The watchdog IP block is running */ if (imx2_wdt_is_running(wdev)) { - imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); + /* + * Don't update wdog->timeout, we'll restore the current value + * during resume. + */ + __imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); imx2_wdt_ping(wdog); } From d16919a3fe2d83ae159b82808933fefb54fc2f6e Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Fri, 12 Jan 2018 18:07:01 -0700 Subject: [PATCH 1928/3326] Btrfs: raid56: iterate raid56 internal bio with bio_for_each_segment_all commit 0198e5b707cfeb5defbd1b71b1ec6e71580d7db9 upstream. Bio iterated by set_bio_pages_uptodate() is raid56 internal one, so it will never be a BIO_CLONED bio, and since this is called by end_io functions, bio->bi_iter.bi_size is zero, we mustn't use bio_for_each_segment() as that is a no-op if bi_size is zero. Fixes: 6592e58c6b68e61f003a01ba29a3716e7e2e9484 ("Btrfs: fix write corruption due to bio cloning on raid5/6") Signed-off-by: Liu Bo Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/raid56.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 24a62224b24b1b..6154825c30e12c 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -1432,14 +1432,13 @@ static int fail_bio_stripe(struct btrfs_raid_bio *rbio, */ static void set_bio_pages_uptodate(struct bio *bio) { - struct bio_vec bvec; - struct bvec_iter iter; + struct bio_vec *bvec; + int i; - if (bio_flagged(bio, BIO_CLONED)) - bio->bi_iter = btrfs_io_bio(bio)->iter; + ASSERT(!bio_flagged(bio, BIO_CLONED)); - bio_for_each_segment(bvec, bio, iter) - SetPageUptodate(bvec.bv_page); + bio_for_each_segment_all(bvec, bio, i) + SetPageUptodate(bvec->bv_page); } /* From e186d8bfdabddcff429b3231948d4af860ae7fc7 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 6 Feb 2018 15:36:00 -0800 Subject: [PATCH 1929/3326] kasan: don't emit builtin calls when sanitization is off commit 0e410e158e5baa1300bdf678cea4f4e0cf9d8b94 upstream. With KASAN enabled the kernel has two different memset() functions, one with KASAN checks (memset) and one without (__memset). KASAN uses some macro tricks to use the proper version where required. For example memset() calls in mm/slub.c are without KASAN checks, since they operate on poisoned slab object metadata. The issue is that clang emits memset() calls even when there is no memset() in the source code. They get linked with improper memset() implementation and the kernel fails to boot due to a huge amount of KASAN reports during early boot stages. The solution is to add -fno-builtin flag for files with KASAN_SANITIZE := n marker. Link: http://lkml.kernel.org/r/8ffecfffe04088c52c42b92739c2bd8a0bcb3f5e.1516384594.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Acked-by: Nick Desaulniers Cc: Masahiro Yamada Cc: Michal Marek Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- Makefile | 3 ++- scripts/Makefile.kasan | 3 +++ scripts/Makefile.lib | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 76a0b13623f418..a7160d6f0f2d0a 100644 --- a/Makefile +++ b/Makefile @@ -416,7 +416,8 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS -export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE +export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan index 1ce7115aa499b7..94c9e65cc83adb 100644 --- a/scripts/Makefile.kasan +++ b/scripts/Makefile.kasan @@ -31,4 +31,7 @@ else endif CFLAGS_KASAN += $(call cc-option, -fsanitize-address-use-after-scope) + +CFLAGS_KASAN_NOSANITIZE := -fno-builtin + endif diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 04b5633df1cfaf..0b46136a91a80b 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -128,7 +128,7 @@ endif ifeq ($(CONFIG_KASAN),y) _c_flags += $(if $(patsubst n%,, \ $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \ - $(CFLAGS_KASAN)) + $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE)) endif ifeq ($(CONFIG_UBSAN),y) From 062cd3463c176c71d23e6ca92cda76500ad158d3 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 6 Feb 2018 15:41:41 -0800 Subject: [PATCH 1930/3326] kasan: rework Kconfig settings commit e7c52b84fb18f08ce49b6067ae6285aca79084a8 upstream. We get a lot of very large stack frames using gcc-7.0.1 with the default -fsanitize-address-use-after-scope --param asan-stack=1 options, which can easily cause an overflow of the kernel stack, e.g. drivers/gpu/drm/i915/gvt/handlers.c:2434:1: warning: the frame size of 46176 bytes is larger than 3072 bytes drivers/net/wireless/ralink/rt2x00/rt2800lib.c:5650:1: warning: the frame size of 23632 bytes is larger than 3072 bytes lib/atomic64_test.c:250:1: warning: the frame size of 11200 bytes is larger than 3072 bytes drivers/gpu/drm/i915/gvt/handlers.c:2621:1: warning: the frame size of 9208 bytes is larger than 3072 bytes drivers/media/dvb-frontends/stv090x.c:3431:1: warning: the frame size of 6816 bytes is larger than 3072 bytes fs/fscache/stats.c:287:1: warning: the frame size of 6536 bytes is larger than 3072 bytes To reduce this risk, -fsanitize-address-use-after-scope is now split out into a separate CONFIG_KASAN_EXTRA Kconfig option, leading to stack frames that are smaller than 2 kilobytes most of the time on x86_64. An earlier version of this patch also prevented combining KASAN_EXTRA with KASAN_INLINE, but that is no longer necessary with gcc-7.0.1. All patches to get the frame size below 2048 bytes with CONFIG_KASAN=y and CONFIG_KASAN_EXTRA=n have been merged by maintainers now, so we can bring back that default now. KASAN_EXTRA=y still causes lots of warnings but now defaults to !COMPILE_TEST to disable it in allmodconfig, and it remains disabled in all other defconfigs since it is a new option. I arbitrarily raise the warning limit for KASAN_EXTRA to 3072 to reduce the noise, but an allmodconfig kernel still has around 50 warnings on gcc-7. I experimented a bit more with smaller stack frames and have another follow-up series that reduces the warning limit for 64-bit architectures to 1280 bytes (without CONFIG_KASAN). With earlier versions of this patch series, I also had patches to address the warnings we get with KASAN and/or KASAN_EXTRA, using a "noinline_if_stackbloat" annotation. That annotation now got replaced with a gcc-8 bugfix (see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715) and a workaround for older compilers, which means that KASAN_EXTRA is now just as bad as before and will lead to an instant stack overflow in a few extreme cases. This reverts parts of commit 3f181b4d8652 ("lib/Kconfig.debug: disable -Wframe-larger-than warnings with KASAN=y"). Two patches in linux-next should be merged first to avoid introducing warnings in an allmodconfig build: 3cd890dbe2a4 ("media: dvb-frontends: fix i2c access helpers for KASAN") 16c3ada89cff ("media: r820t: fix r820t_write_reg for KASAN") Do we really need to backport this? I think we do: without this patch, enabling KASAN will lead to unavoidable kernel stack overflow in certain device drivers when built with gcc-7 or higher on linux-4.10+ or any version that contains a backport of commit c5caf21ab0cf8. Most people are probably still on older compilers, but it will get worse over time as they upgrade their distros. The warnings we get on kernels older than this should all be for code that uses dangerously large stack frames, though most of them do not cause an actual stack overflow by themselves.The asan-stack option was added in linux-4.0, and commit 3f181b4d8652 ("lib/Kconfig.debug: disable -Wframe-larger-than warnings with KASAN=y") effectively turned off the warning for allmodconfig kernels, so I would like to see this fix backported to any kernels later than 4.0. I have done dozens of fixes for individual functions with stack frames larger than 2048 bytes with asan-stack, and I plan to make sure that all those fixes make it into the stable kernels as well (most are already there). Part of the complication here is that asan-stack (from 4.0) was originally assumed to always require much larger stacks, but that turned out to be a combination of multiple gcc bugs that we have now worked around and fixed, but sanitize-address-use-after-scope (from v4.10) has a much higher inherent stack usage and also suffers from at least three other problems that we have analyzed but not yet fixed upstream, each of them makes the stack usage more severe than it should be. Link: http://lkml.kernel.org/r/20171221134744.2295529-1-arnd@arndb.de Signed-off-by: Arnd Bergmann Acked-by: Andrey Ryabinin Cc: Mauro Carvalho Chehab Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Andrey Konovalov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- lib/Kconfig.debug | 2 +- lib/Kconfig.kasan | 11 +++++++++++ scripts/Makefile.kasan | 2 ++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ff21b4dbb3922f..00cb02daeddddd 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -217,7 +217,7 @@ config ENABLE_MUST_CHECK config FRAME_WARN int "Warn for stack frames larger than (needs gcc 4.4)" range 0 8192 - default 0 if KASAN + default 3072 if KASAN_EXTRA default 2048 if GCC_PLUGIN_LATENT_ENTROPY default 1280 if (!64BIT && PARISC) default 1024 if (!64BIT && !PARISC) diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index bd38aab0592981..3d35d062970d24 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -20,6 +20,17 @@ config KASAN Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB (the resulting kernel does not boot). +config KASAN_EXTRA + bool "KAsan: extra checks" + depends on KASAN && DEBUG_KERNEL && !COMPILE_TEST + help + This enables further checks in the kernel address sanitizer, for now + it only includes the address-use-after-scope check that can lead + to excessive kernel stack usage, frame size warnings and longer + compile time. + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 has more + + choice prompt "Instrumentation type" depends on KASAN diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan index 94c9e65cc83adb..97a56c0b565a4a 100644 --- a/scripts/Makefile.kasan +++ b/scripts/Makefile.kasan @@ -30,7 +30,9 @@ else endif endif +ifdef CONFIG_KASAN_EXTRA CFLAGS_KASAN += $(call cc-option, -fsanitize-address-use-after-scope) +endif CFLAGS_KASAN_NOSANITIZE := -fno-builtin From ad91b2e392be4339e09eefd8479675b4232ddfa1 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 30 Nov 2017 11:55:46 -0500 Subject: [PATCH 1931/3326] media: dvb-frontends: fix i2c access helpers for KASAN commit 3cd890dbe2a4f14cc44c85bb6cf37e5e22d4dd0e upstream. A typical code fragment was copied across many dvb-frontend drivers and causes large stack frames when built with with CONFIG_KASAN on gcc-5/6/7: drivers/media/dvb-frontends/cxd2841er.c:3225:1: error: the frame size of 3992 bytes is larger than 3072 bytes [-Werror=frame-larger-than=] drivers/media/dvb-frontends/cxd2841er.c:3404:1: error: the frame size of 3136 bytes is larger than 3072 bytes [-Werror=frame-larger-than=] drivers/media/dvb-frontends/stv0367.c:3143:1: error: the frame size of 4016 bytes is larger than 3072 bytes [-Werror=frame-larger-than=] drivers/media/dvb-frontends/stv090x.c:3430:1: error: the frame size of 5312 bytes is larger than 3072 bytes [-Werror=frame-larger-than=] drivers/media/dvb-frontends/stv090x.c:4248:1: error: the frame size of 4872 bytes is larger than 3072 bytes [-Werror=frame-larger-than=] gcc-8 now solves this by consolidating the stack slots for the argument variables, but on older compilers we can get the same behavior by taking the pointer of a local variable rather than the inline function argument. Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 Signed-off-by: Arnd Bergmann Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/dvb-frontends/ascot2e.c | 4 +++- drivers/media/dvb-frontends/cxd2841er.c | 4 +++- drivers/media/dvb-frontends/helene.c | 4 +++- drivers/media/dvb-frontends/horus3a.c | 4 +++- drivers/media/dvb-frontends/itd1000.c | 5 +++-- drivers/media/dvb-frontends/mt312.c | 5 ++++- drivers/media/dvb-frontends/stb0899_drv.c | 3 ++- drivers/media/dvb-frontends/stb6100.c | 6 ++++-- drivers/media/dvb-frontends/stv0367.c | 4 +++- drivers/media/dvb-frontends/stv090x.c | 4 +++- drivers/media/dvb-frontends/stv6110x.c | 4 +++- drivers/media/dvb-frontends/zl10039.c | 4 +++- 12 files changed, 37 insertions(+), 14 deletions(-) diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c index 0ee0df53b91bbb..79d5d89bc95e7d 100644 --- a/drivers/media/dvb-frontends/ascot2e.c +++ b/drivers/media/dvb-frontends/ascot2e.c @@ -155,7 +155,9 @@ static int ascot2e_write_regs(struct ascot2e_priv *priv, static int ascot2e_write_reg(struct ascot2e_priv *priv, u8 reg, u8 val) { - return ascot2e_write_regs(priv, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return ascot2e_write_regs(priv, reg, &tmp, 1); } static int ascot2e_read_regs(struct ascot2e_priv *priv, diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c index 48ee9bc00c069d..ccbd84fd642826 100644 --- a/drivers/media/dvb-frontends/cxd2841er.c +++ b/drivers/media/dvb-frontends/cxd2841er.c @@ -257,7 +257,9 @@ static int cxd2841er_write_regs(struct cxd2841er_priv *priv, static int cxd2841er_write_reg(struct cxd2841er_priv *priv, u8 addr, u8 reg, u8 val) { - return cxd2841er_write_regs(priv, addr, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return cxd2841er_write_regs(priv, addr, reg, &tmp, 1); } static int cxd2841er_read_regs(struct cxd2841er_priv *priv, diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c index 4bf5a551ba403f..2ab8d83e557683 100644 --- a/drivers/media/dvb-frontends/helene.c +++ b/drivers/media/dvb-frontends/helene.c @@ -331,7 +331,9 @@ static int helene_write_regs(struct helene_priv *priv, static int helene_write_reg(struct helene_priv *priv, u8 reg, u8 val) { - return helene_write_regs(priv, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return helene_write_regs(priv, reg, &tmp, 1); } static int helene_read_regs(struct helene_priv *priv, diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c index 68d759c4c52ec1..5c8b405f2ddc50 100644 --- a/drivers/media/dvb-frontends/horus3a.c +++ b/drivers/media/dvb-frontends/horus3a.c @@ -89,7 +89,9 @@ static int horus3a_write_regs(struct horus3a_priv *priv, static int horus3a_write_reg(struct horus3a_priv *priv, u8 reg, u8 val) { - return horus3a_write_regs(priv, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return horus3a_write_regs(priv, reg, &tmp, 1); } static int horus3a_enter_power_save(struct horus3a_priv *priv) diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c index 5bb1e73a10b439..ce7c443d3eacda 100644 --- a/drivers/media/dvb-frontends/itd1000.c +++ b/drivers/media/dvb-frontends/itd1000.c @@ -95,8 +95,9 @@ static int itd1000_read_reg(struct itd1000_state *state, u8 reg) static inline int itd1000_write_reg(struct itd1000_state *state, u8 r, u8 v) { - int ret = itd1000_write_regs(state, r, &v, 1); - state->shadow[r] = v; + u8 tmp = v; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + int ret = itd1000_write_regs(state, r, &tmp, 1); + state->shadow[r] = tmp; return ret; } diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c index 961b9a2508e0ce..0b23cbc021b81a 100644 --- a/drivers/media/dvb-frontends/mt312.c +++ b/drivers/media/dvb-frontends/mt312.c @@ -142,7 +142,10 @@ static inline int mt312_readreg(struct mt312_state *state, static inline int mt312_writereg(struct mt312_state *state, const enum mt312_reg_addr reg, const u8 val) { - return mt312_write(state, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + + return mt312_write(state, reg, &tmp, 1); } static inline u32 mt312_div(u32 a, u32 b) diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c index 02347598277aff..db5dde3215f020 100644 --- a/drivers/media/dvb-frontends/stb0899_drv.c +++ b/drivers/media/dvb-frontends/stb0899_drv.c @@ -539,7 +539,8 @@ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data, int stb0899_write_reg(struct stb0899_state *state, unsigned int reg, u8 data) { - return stb0899_write_regs(state, reg, &data, 1); + u8 tmp = data; + return stb0899_write_regs(state, reg, &tmp, 1); } /* diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c index 17a955d0031bb6..75509bec66e4cb 100644 --- a/drivers/media/dvb-frontends/stb6100.c +++ b/drivers/media/dvb-frontends/stb6100.c @@ -226,12 +226,14 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st static int stb6100_write_reg(struct stb6100_state *state, u8 reg, u8 data) { + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + if (unlikely(reg >= STB6100_NUMREGS)) { dprintk(verbose, FE_ERROR, 1, "Invalid register offset 0x%x", reg); return -EREMOTEIO; } - data = (data & stb6100_template[reg].mask) | stb6100_template[reg].set; - return stb6100_write_reg_range(state, &data, reg, 1); + tmp = (tmp & stb6100_template[reg].mask) | stb6100_template[reg].set; + return stb6100_write_reg_range(state, &tmp, reg, 1); } diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c index f3529df8211dcc..1a726196c12654 100644 --- a/drivers/media/dvb-frontends/stv0367.c +++ b/drivers/media/dvb-frontends/stv0367.c @@ -166,7 +166,9 @@ int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len) static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data) { - return stv0367_writeregs(state, reg, &data, 1); + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return stv0367_writeregs(state, reg, &tmp, 1); } static u8 stv0367_readreg(struct stv0367_state *state, u16 reg) diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c index 7ef469c0c8668a..2695e1eb6d9c4a 100644 --- a/drivers/media/dvb-frontends/stv090x.c +++ b/drivers/media/dvb-frontends/stv090x.c @@ -755,7 +755,9 @@ static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8 static int stv090x_write_reg(struct stv090x_state *state, unsigned int reg, u8 data) { - return stv090x_write_regs(state, reg, &data, 1); + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return stv090x_write_regs(state, reg, &tmp, 1); } static int stv090x_i2c_gate_ctrl(struct stv090x_state *state, int enable) diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c index 66eba38f1014cd..7e8e01389c55fa 100644 --- a/drivers/media/dvb-frontends/stv6110x.c +++ b/drivers/media/dvb-frontends/stv6110x.c @@ -97,7 +97,9 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da static int stv6110x_write_reg(struct stv6110x_state *stv6110x, u8 reg, u8 data) { - return stv6110x_write_regs(stv6110x, reg, &data, 1); + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return stv6110x_write_regs(stv6110x, reg, &tmp, 1); } static int stv6110x_init(struct dvb_frontend *fe) diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c index 623355fc26661a..3208b866d1cb9c 100644 --- a/drivers/media/dvb-frontends/zl10039.c +++ b/drivers/media/dvb-frontends/zl10039.c @@ -134,7 +134,9 @@ static inline int zl10039_writereg(struct zl10039_state *state, const enum zl10039_reg_addr reg, const u8 val) { - return zl10039_write(state, reg, &val, 1); + const u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return zl10039_write(state, reg, &tmp, 1); } static int zl10039_init(struct dvb_frontend *fe) From 7a401e25f0a5909abf3f3e4fc7518b173cc5328f Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Wed, 10 Jan 2018 07:20:39 -0500 Subject: [PATCH 1932/3326] media: ts2020: avoid integer overflows on 32 bit machines commit 81742be14b6a90c9fd0ff6eb4218bdf696ad8e46 upstream. Before this patch, when compiled for arm32, the signal strength were reported as: Lock (0x1f) Signal= 4294908.66dBm C/N= 12.79dB Because of a 32 bit integer overflow. After it, it is properly reported as: Lock (0x1f) Signal= -58.64dBm C/N= 12.79dB Fixes: 0f91c9d6bab9 ("[media] TS2020: Calculate tuner gain correctly") Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/dvb-frontends/ts2020.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c index 931e5c98da8af9..b879e157146965 100644 --- a/drivers/media/dvb-frontends/ts2020.c +++ b/drivers/media/dvb-frontends/ts2020.c @@ -368,7 +368,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc, gain2 = clamp_t(long, gain2, 0, 13); v_agc = clamp_t(long, v_agc, 400, 1100); - *_gain = -(gain1 * 2330 + + *_gain = -((__s64)gain1 * 2330 + gain2 * 3500 + v_agc * 24 / 10 * 10 + 10000); @@ -386,7 +386,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc, gain3 = clamp_t(long, gain3, 0, 6); v_agc = clamp_t(long, v_agc, 600, 1600); - *_gain = -(gain1 * 2650 + + *_gain = -((__s64)gain1 * 2650 + gain2 * 3380 + gain3 * 2850 + v_agc * 176 / 100 * 10 - From ebe2ba53851e785243d4ee3d7f4fcf64653a987b Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Wed, 24 Jan 2018 06:01:57 -0500 Subject: [PATCH 1933/3326] media: cxusb, dib0700: ignore XC2028_I2C_FLUSH commit 9893b905e743ded332575ca04486bd586c0772f7 upstream. The XC2028_I2C_FLUSH only needs to be implemented on a few devices. Others can safely ignore it. That prevents filling the dmesg with lots of messages like: dib0700: stk7700ph_xc3028_callback: unknown command 2, arg 0 Fixes: 4d37ece757a8 ("[media] tuner/xc2028: Add I2C flush callback") Reported-by: Enrico Mioso Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/dvb-usb/cxusb.c | 2 ++ drivers/media/usb/dvb-usb/dib0700_devices.c | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index 37dea0adc69538..cfe86b4864b3cc 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -677,6 +677,8 @@ static int dvico_bluebird_xc2028_callback(void *ptr, int component, case XC2028_RESET_CLK: deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg); break; + case XC2028_I2C_FLUSH: + break; default: deb_info("%s: unknown command %d, arg %d\n", __func__, command, arg); diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 92098c1b78e51c..9be1e658ef47ec 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c @@ -430,6 +430,7 @@ static int stk7700ph_xc3028_callback(void *ptr, int component, state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); break; case XC2028_RESET_CLK: + case XC2028_I2C_FLUSH: break; default: err("%s: unknown command %d, arg %d\n", __func__, From 7e54b58285e1e5b3f2430342d40f98a53401bb91 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 6 Feb 2018 15:37:13 -0800 Subject: [PATCH 1934/3326] fs/proc/kcore.c: use probe_kernel_read() instead of memcpy() commit d0290bc20d4739b7a900ae37eb5d4cc3be2b393f upstream. Commit df04abfd181a ("fs/proc/kcore.c: Add bounce buffer for ktext data") added a bounce buffer to avoid hardened usercopy checks. Copying to the bounce buffer was implemented with a simple memcpy() assuming that it is always valid to read from kernel memory iff the kern_addr_valid() check passed. A simple, but pointless, test case like "dd if=/proc/kcore of=/dev/null" now can easily crash the kernel, since the former execption handling on invalid kernel addresses now doesn't work anymore. Also adding a kern_addr_valid() implementation wouldn't help here. Most architectures simply return 1 here, while a couple implemented a page table walk to figure out if something is mapped at the address in question. With DEBUG_PAGEALLOC active mappings are established and removed all the time, so that relying on the result of kern_addr_valid() before executing the memcpy() also doesn't work. Therefore simply use probe_kernel_read() to copy to the bounce buffer. This also allows to simplify read_kcore(). At least on s390 this fixes the observed crashes and doesn't introduce warnings that were removed with df04abfd181a ("fs/proc/kcore.c: Add bounce buffer for ktext data"), even though the generic probe_kernel_read() implementation uses uaccess functions. While looking into this I'm also wondering if kern_addr_valid() could be completely removed...(?) Link: http://lkml.kernel.org/r/20171202132739.99971-1-heiko.carstens@de.ibm.com Fixes: df04abfd181a ("fs/proc/kcore.c: Add bounce buffer for ktext data") Fixes: f5509cc18daa ("mm: Hardened usercopy") Signed-off-by: Heiko Carstens Acked-by: Kees Cook Cc: Jiri Olsa Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/proc/kcore.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 4bc85cb8be6a2f..e8a93bc8285d85 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -512,23 +512,15 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) return -EFAULT; } else { if (kern_addr_valid(start)) { - unsigned long n; - /* * Using bounce buffer to bypass the * hardened user copy kernel text checks. */ - memcpy(buf, (char *) start, tsz); - n = copy_to_user(buffer, buf, tsz); - /* - * We cannot distinguish between fault on source - * and fault on destination. When this happens - * we clear too and hope it will trigger the - * EFAULT again. - */ - if (n) { - if (clear_user(buffer + tsz - n, - n)) + if (probe_kernel_read(buf, (void *) start, tsz)) { + if (clear_user(buffer, tsz)) + return -EFAULT; + } else { + if (copy_to_user(buffer, buf, tsz)) return -EFAULT; } } else { From 838f9cc948b00a31a56c0aa115a1ceca507b084b Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Tue, 6 Feb 2018 15:37:55 -0800 Subject: [PATCH 1935/3326] kernel/async.c: revert "async: simplify lowest_in_progress()" commit 4f7e988e63e336827f4150de48163bed05d653bd upstream. This reverts commit 92266d6ef60c ("async: simplify lowest_in_progress()") which was simply wrong: In the case where domain is NULL, we now use the wrong offsetof() in the list_first_entry macro, so we don't actually fetch the ->cookie value, but rather the eight bytes located sizeof(struct list_head) further into the struct async_entry. On 64 bit, that's the data member, while on 32 bit, that's a u64 built from func and data in some order. I think the bug happens to be harmless in practice: It obviously only affects callers which pass a NULL domain, and AFAICT the only such caller is async_synchronize_full() -> async_synchronize_full_domain(NULL) -> async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, NULL) and the ASYNC_COOKIE_MAX means that in practice we end up waiting for the async_global_pending list to be empty - but it would break if somebody happened to pass (void*)-1 as the data element to async_schedule, and of course also if somebody ever does a async_synchronize_cookie_domain(, NULL) with a "finite" cookie value. Maybe the "harmless in practice" means this isn't -stable material. But I'm not completely confident my quick git grep'ing is enough, and there might be affected code in one of the earlier kernels that has since been removed, so I'll leave the decision to the stable guys. Link: http://lkml.kernel.org/r/20171128104938.3921-1-linux@rasmusvillemoes.dk Fixes: 92266d6ef60c "async: simplify lowest_in_progress()" Signed-off-by: Rasmus Villemoes Acked-by: Tejun Heo Cc: Arjan van de Ven Cc: Adam Wallis Cc: Lai Jiangshan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- kernel/async.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/kernel/async.c b/kernel/async.c index 2cbd3dd5940dea..a893d6170944f7 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -84,20 +84,24 @@ static atomic_t entry_count; static async_cookie_t lowest_in_progress(struct async_domain *domain) { - struct list_head *pending; + struct async_entry *first = NULL; async_cookie_t ret = ASYNC_COOKIE_MAX; unsigned long flags; spin_lock_irqsave(&async_lock, flags); - if (domain) - pending = &domain->pending; - else - pending = &async_global_pending; + if (domain) { + if (!list_empty(&domain->pending)) + first = list_first_entry(&domain->pending, + struct async_entry, domain_list); + } else { + if (!list_empty(&async_global_pending)) + first = list_first_entry(&async_global_pending, + struct async_entry, global_list); + } - if (!list_empty(pending)) - ret = list_first_entry(pending, struct async_entry, - domain_list)->cookie; + if (first) + ret = first->cookie; spin_unlock_irqrestore(&async_lock, flags); return ret; From 75526a9bd3f4fe83161ae2f723fbc036e07194ae Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 6 Feb 2018 15:40:24 -0800 Subject: [PATCH 1936/3326] kernel/relay.c: revert "kernel/relay.c: fix potential memory leak" commit a1be1f3931bfe0a42b46fef77a04593c2b136e7f upstream. This reverts commit ba62bafe942b ("kernel/relay.c: fix potential memory leak"). This commit introduced a double free bug, because 'chan' is already freed by the line: kref_put(&chan->kref, relay_destroy_channel); This bug was found by syzkaller, using the BLKTRACESETUP ioctl. Link: http://lkml.kernel.org/r/20180127004759.101823-1-ebiggers3@gmail.com Fixes: ba62bafe942b ("kernel/relay.c: fix potential memory leak") Signed-off-by: Eric Biggers Reported-by: syzbot Reviewed-by: Andrew Morton Cc: Zhouyi Zhou Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- kernel/relay.c | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/relay.c b/kernel/relay.c index 39a9dfc69486b5..55da824f4adcff 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -611,7 +611,6 @@ struct rchan *relay_open(const char *base_filename, kref_put(&chan->kref, relay_destroy_channel); mutex_unlock(&relay_channels_mutex); - kfree(chan); return NULL; } EXPORT_SYMBOL_GPL(relay_open); From a125e9a42302c5dc3c85b88eebc32c43bb65dd07 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 6 Feb 2018 15:41:53 -0800 Subject: [PATCH 1937/3326] pipe: actually allow root to exceed the pipe buffer limits commit 85c2dd5473b2718b4b63e74bfeb1ca876868e11f upstream. pipe-user-pages-hard and pipe-user-pages-soft are only supposed to apply to unprivileged users, as documented in both Documentation/sysctl/fs.txt and the pipe(7) man page. However, the capabilities are actually only checked when increasing a pipe's size using F_SETPIPE_SZ, not when creating a new pipe. Therefore, if pipe-user-pages-hard has been set, the root user can run into it and be unable to create pipes. Similarly, if pipe-user-pages-soft has been set, the root user can run into it and have their pipes limited to 1 page each. Fix this by allowing the privileged override in both cases. Link: http://lkml.kernel.org/r/20180111052902.14409-4-ebiggers3@gmail.com Fixes: 759c01142a5d ("pipe: limit the per-user amount of pages allocated in pipes") Signed-off-by: Eric Biggers Acked-by: Kees Cook Acked-by: Joe Lawrence Cc: Alexander Viro Cc: "Luis R . Rodriguez" Cc: Michael Kerrisk Cc: Mikulas Patocka Cc: Willy Tarreau Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/pipe.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/fs/pipe.c b/fs/pipe.c index f0f4ab36c444d4..984067df26bb1e 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -618,6 +618,11 @@ static bool too_many_pipe_buffers_hard(unsigned long user_bufs) return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard; } +static bool is_unprivileged_user(void) +{ + return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); +} + struct pipe_inode_info *alloc_pipe_info(void) { struct pipe_inode_info *pipe; @@ -634,12 +639,12 @@ struct pipe_inode_info *alloc_pipe_info(void) user_bufs = account_pipe_buffers(user, 0, pipe_bufs); - if (too_many_pipe_buffers_soft(user_bufs)) { + if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) { user_bufs = account_pipe_buffers(user, pipe_bufs, 1); pipe_bufs = 1; } - if (too_many_pipe_buffers_hard(user_bufs)) + if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user()) goto out_revert_acct; pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), @@ -1069,7 +1074,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) if (nr_pages > pipe->buffers && (too_many_pipe_buffers_hard(user_bufs) || too_many_pipe_buffers_soft(user_bufs)) && - !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { + is_unprivileged_user()) { ret = -EPERM; goto out_revert_acct; } From 703b37d845987cedd76c534df7719bb8024ad66e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 6 Feb 2018 15:41:56 -0800 Subject: [PATCH 1938/3326] pipe: fix off-by-one error when checking buffer limits commit 9903a91c763ecdae333a04a9d89d79d2b8966503 upstream. With pipe-user-pages-hard set to 'N', users were actually only allowed up to 'N - 1' buffers; and likewise for pipe-user-pages-soft. Fix this to allow up to 'N' buffers, as would be expected. Link: http://lkml.kernel.org/r/20180111052902.14409-5-ebiggers3@gmail.com Fixes: b0b91d18e2e9 ("pipe: fix limit checking in pipe_set_size()") Signed-off-by: Eric Biggers Acked-by: Willy Tarreau Acked-by: Kees Cook Acked-by: Joe Lawrence Cc: Alexander Viro Cc: "Luis R . Rodriguez" Cc: Michael Kerrisk Cc: Mikulas Patocka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/pipe.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/pipe.c b/fs/pipe.c index 984067df26bb1e..8ef7d7bef77579 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -610,12 +610,12 @@ static unsigned long account_pipe_buffers(struct user_struct *user, static bool too_many_pipe_buffers_soft(unsigned long user_bufs) { - return pipe_user_pages_soft && user_bufs >= pipe_user_pages_soft; + return pipe_user_pages_soft && user_bufs > pipe_user_pages_soft; } static bool too_many_pipe_buffers_hard(unsigned long user_bufs) { - return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard; + return pipe_user_pages_hard && user_bufs > pipe_user_pages_hard; } static bool is_unprivileged_user(void) From 0e4ac4aed9f92796c4177f065f4dbd1bc9c3a432 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 17 Jan 2018 21:05:55 +0100 Subject: [PATCH 1939/3326] HID: quirks: Fix keyboard + touchpad on Toshiba Click Mini not working commit edfc3722cfef4217c7fe92b272cbe0288ba1ff57 upstream. The Toshiba Click Mini uses an i2c attached keyboard/touchpad combo (single i2c_hid device for both) which has a vid:pid of 04F3:0401, which is also used by a bunch of Elan touchpads which are handled by the drivers/input/mouse/elan_i2c driver, but that driver deals with pure touchpads and does not work for a combo device such as the one on the Toshiba Click Mini. The combo on the Mini has an ACPI id of ELAN0800, which is not claimed by the elan_i2c driver, so check for that and if it is found do not ignore the device. This fixes the keyboard/touchpad combo on the Mini not working (although with the touchpad in mouse emulation mode). Signed-off-by: Hans de Goede Signed-off-by: Jiri Kosina Signed-off-by: Greg Kroah-Hartman --- drivers/hid/hid-core.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 330ca983828ba6..5744eb729d2433 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2638,7 +2638,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) }, { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, @@ -2908,6 +2907,17 @@ bool hid_ignore(struct hid_device *hdev) strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0) return true; break; + case USB_VENDOR_ID_ELAN: + /* + * Many Elan devices have a product id of 0x0401 and are handled + * by the elan_i2c input driver. But the ACPI HID ELAN0800 dev + * is not (and cannot be) handled by that driver -> + * Ignore all 0x0401 devs except for the ELAN0800 dev. + */ + if (hdev->product == 0x0401 && + strncmp(hdev->name, "ELAN0800", 8) != 0) + return true; + break; } if (hdev->type == HID_TYPE_USBMOUSE && From ed72fcf6436271f63a4afc122770ae6537c8d3ce Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 29 Nov 2017 20:29:07 +0100 Subject: [PATCH 1940/3326] Bluetooth: btsdio: Do not bind to non-removable BCM43341 commit b4cdaba274247c9c841c6a682c08fa91fb3aa549 upstream. BCM43341 devices soldered onto the PCB (non-removable) always (AFAICT) use an UART connection for bluetooth. But they also advertise btsdio support on their 3th sdio function, this causes 2 problems: 1) A non functioning BT HCI getting registered 2) Since the btsdio driver does not have suspend/resume callbacks, mmc_sdio_pre_suspend will return -ENOSYS, causing mmc_pm_notify() to react as if the SDIO-card is removed and since the slot is marked as non-removable it will never get detected as inserted again. Which results in wifi no longer working after a suspend/resume. This commit fixes both by making btsdio ignore BCM43341 devices when connected to a slot which is marked non-removable. Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann Signed-off-by: Greg Kroah-Hartman --- drivers/bluetooth/btsdio.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index c8e945d19ffebc..20142bc77554c0 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -31,6 +31,7 @@ #include #include +#include #include #include @@ -292,6 +293,14 @@ static int btsdio_probe(struct sdio_func *func, tuple = tuple->next; } + /* BCM43341 devices soldered onto the PCB (non-removable) use an + * uart connection for bluetooth, ignore the BT SDIO interface. + */ + if (func->vendor == SDIO_VENDOR_ID_BROADCOM && + func->device == SDIO_DEVICE_ID_BROADCOM_43341 && + !mmc_card_is_removable(func->card->host)) + return -ENODEV; + data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; From 2a2ee0c1fee7268f0cdf29415ba2708d3c2bfa33 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Wed, 20 Dec 2017 19:00:07 +0800 Subject: [PATCH 1941/3326] Revert "Bluetooth: btusb: fix QCA Rome suspend/resume" commit 7d06d5895c159f64c46560dc258e553ad8670fe0 upstream. This reverts commit fd865802c66bc451dc515ed89360f84376ce1a56. This commit causes a regression on some QCA ROME chips. The USB device reset happens in btusb_open(), hence firmware loading gets interrupted. Furthermore, this commit stops working after commit ("a0085f2510e8976614ad8f766b209448b385492f Bluetooth: btusb: driver to enable the usb-wakeup feature"). Reset-resume quirk only gets enabled in btusb_suspend() when it's not a wakeup source. If we really want to reset the USB device, we need to do it before btusb_open(). Let's handle it in drivers/usb/core/quirks.c. Cc: Leif Liddy Cc: Matthias Kaehlcke Cc: Brian Norris Cc: Daniel Drake Signed-off-by: Kai-Heng Feng Reviewed-by: Brian Norris Tested-by: Brian Norris Signed-off-by: Marcel Holtmann Signed-off-by: Greg Kroah-Hartman --- drivers/bluetooth/btusb.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 513a7a59d421e7..0a97142ccc427b 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -3099,12 +3099,6 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; - - /* QCA Rome devices lose their updated firmware over suspend, - * but the USB hub doesn't notice any status change. - * Explicitly request a device reset on resume. - */ - set_bit(BTUSB_RESET_RESUME, &data->flags); } #ifdef CONFIG_BT_HCIBTUSB_RTL From fed016a79b42943f7dea7409d107b641fe3d1080 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 8 Jan 2018 10:44:16 +0100 Subject: [PATCH 1942/3326] Bluetooth: btusb: Restore QCA Rome suspend/resume fix with a "rewritten" version commit 61f5acea8737d9b717fcc22bb6679924f3c82b98 upstream. Commit 7d06d5895c15 ("Revert "Bluetooth: btusb: fix QCA...suspend/resume"") removed the setting of the BTUSB_RESET_RESUME quirk for QCA Rome devices, instead favoring adding USB_QUIRK_RESET_RESUME quirks in usb/core/quirks.c. This was done because the DIY BTUSB_RESET_RESUME reset-resume handling has several issues (see the original commit message). An added advantage of moving over to the USB-core reset-resume handling is that it also disables autosuspend for these devices, which is similarly broken on these. But there are 2 issues with this approach: 1) It leaves the broken DIY BTUSB_RESET_RESUME code in place for Realtek devices. 2) Sofar only 2 of the 10 QCA devices known to the btusb code have been added to usb/core/quirks.c and if we fix the Realtek case the same way we need to add an additional 14 entries. So in essence we need to duplicate a large part of the usb_device_id table in btusb.c in usb/core/quirks.c and manually keep them in sync. This commit instead restores setting a reset-resume quirk for QCA devices in the btusb.c code, avoiding the duplicate usb_device_id table problem. This commit avoids the problems with the original DIY BTUSB_RESET_RESUME code by simply setting the USB_QUIRK_RESET_RESUME quirk directly on the usb_device. This commit also moves the BTUSB_REALTEK case over to directly setting the USB_QUIRK_RESET_RESUME on the usb_device and removes the now unused BTUSB_RESET_RESUME code. BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1514836 Fixes: 7d06d5895c15 ("Revert "Bluetooth: btusb: fix QCA...suspend/resume"") Cc: Leif Liddy Cc: Matthias Kaehlcke Cc: Brian Norris Cc: Daniel Drake Cc: Kai-Heng Feng Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann Signed-off-by: Greg Kroah-Hartman --- drivers/bluetooth/btusb.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 0a97142ccc427b..d54c3f6f728c24 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -392,9 +393,8 @@ static const struct usb_device_id blacklist_table[] = { #define BTUSB_FIRMWARE_LOADED 7 #define BTUSB_FIRMWARE_FAILED 8 #define BTUSB_BOOTING 9 -#define BTUSB_RESET_RESUME 10 -#define BTUSB_DIAG_RUNNING 11 -#define BTUSB_OOB_WAKE_ENABLED 12 +#define BTUSB_DIAG_RUNNING 10 +#define BTUSB_OOB_WAKE_ENABLED 11 struct btusb_data { struct hci_dev *hdev; @@ -3099,6 +3099,12 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; + + /* QCA Rome devices lose their updated firmware over suspend, + * but the USB hub doesn't notice any status change. + * explicitly request a device reset on resume. + */ + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; } #ifdef CONFIG_BT_HCIBTUSB_RTL @@ -3109,7 +3115,7 @@ static int btusb_probe(struct usb_interface *intf, * but the USB hub doesn't notice any status change. * Explicitly request a device reset on resume. */ - set_bit(BTUSB_RESET_RESUME, &data->flags); + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; } #endif @@ -3274,14 +3280,6 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message) enable_irq(data->oob_wake_irq); } - /* Optionally request a device reset on resume, but only when - * wakeups are disabled. If wakeups are enabled we assume the - * device will stay powered up throughout suspend. - */ - if (test_bit(BTUSB_RESET_RESUME, &data->flags) && - !device_may_wakeup(&data->udev->dev)) - data->udev->reset_resume = 1; - return 0; } From 765ae618ad9f106f6231570f29d0d5be3b56d269 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 18 Jan 2018 00:36:57 +0800 Subject: [PATCH 1943/3326] ipmi: use dynamic memory for DMI driver override commit 5516e21a1e95e9b9f39985598431a25477d91643 upstream. Currently a crash can be seen if we reach the "err" label in dmi_add_platform_ipmi(), calling platform_device_put(), like here: [ 7.270584] (null): ipmi:dmi: Unable to add resources: -16 [ 7.330229] ------------[ cut here ]------------ [ 7.334889] kernel BUG at mm/slub.c:3894! [ 7.338936] Internal error: Oops - BUG: 0 [#1] PREEMPT SMP [ 7.344475] Modules linked in: [ 7.347556] CPU: 1 PID: 1 Comm: swapper/0 Not tainted 4.15.0-rc2-00004-gbe9cb7b-dirty #114 [ 7.355907] Hardware name: Huawei Taishan 2280 /D05, BIOS Hisilicon D05 IT17 Nemo 2.0 RC0 11/29/2017 [ 7.365137] task: 00000000c211f6d3 task.stack: 00000000f276e9af [ 7.371116] pstate: 60000005 (nZCv daif -PAN -UAO) [ 7.375957] pc : kfree+0x194/0x1b4 [ 7.379389] lr : platform_device_release+0xcc/0xd8 [ 7.384225] sp : ffff0000092dba90 [ 7.387567] x29: ffff0000092dba90 x28: ffff000008a83000 [ 7.392933] x27: ffff0000092dbc10 x26: 00000000000000e6 [ 7.398297] x25: 0000000000000003 x24: ffff0000085b51e8 [ 7.403662] x23: 0000000000000100 x22: ffff7e0000234cc0 [ 7.409027] x21: ffff000008af3660 x20: ffff8017d21acc10 [ 7.414392] x19: ffff8017d21acc00 x18: 0000000000000002 [ 7.419757] x17: 0000000000000001 x16: 0000000000000008 [ 7.425121] x15: 0000000000000001 x14: 6666666678303d65 [ 7.430486] x13: 6469727265766f5f x12: 7265766972642e76 [ 7.435850] x11: 6564703e2d617020 x10: 6530326435373638 [ 7.441215] x9 : 3030303030303030 x8 : 3d76656420657361 [ 7.446580] x7 : ffff000008f59df8 x6 : ffff8017fbe0ea50 [ 7.451945] x5 : 0000000000000000 x4 : 0000000000000000 [ 7.457309] x3 : ffffffffffffffff x2 : 0000000000000000 [ 7.462674] x1 : 0fffc00000000800 x0 : ffff7e0000234ce0 [ 7.468039] Process swapper/0 (pid: 1, stack limit = 0x00000000f276e9af) [ 7.474809] Call trace: [ 7.477272] kfree+0x194/0x1b4 [ 7.480351] platform_device_release+0xcc/0xd8 [ 7.484837] device_release+0x34/0x90 [ 7.488531] kobject_put+0x70/0xcc [ 7.491961] put_device+0x14/0x1c [ 7.495304] platform_device_put+0x14/0x1c [ 7.499439] dmi_add_platform_ipmi+0x348/0x3ac [ 7.503923] scan_for_dmi_ipmi+0xfc/0x10c [ 7.507970] do_one_initcall+0x38/0x124 [ 7.511840] kernel_init_freeable+0x188/0x228 [ 7.516238] kernel_init+0x10/0x100 [ 7.519756] ret_from_fork+0x10/0x18 [ 7.523362] Code: f94002c0 37780080 f94012c0 37000040 (d4210000) [ 7.529552] ---[ end trace 11750e4787deef9e ]--- [ 7.534228] Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b [ 7.534228] This is because when the device is released in platform_device_release(), we try to free pdev.driver_override. This is a const string, hence the crash. Fix by using dynamic memory for pdev->driver_override. Signed-off-by: John Garry [Removed the free of driver_override from ipmi_si_remove_by_dev(). The free is done in platform_device_release(), and would result in a double free, and ipmi_si_remove_by_dev() is called by non-platform devices.] Signed-off-by: Corey Minyard Signed-off-by: Greg Kroah-Hartman --- drivers/char/ipmi/ipmi_dmi.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c index 2059f79d669a07..c3a23ec3e76f71 100644 --- a/drivers/char/ipmi/ipmi_dmi.c +++ b/drivers/char/ipmi/ipmi_dmi.c @@ -81,7 +81,10 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr, pr_err("ipmi:dmi: Error allocation IPMI platform device"); return; } - pdev->driver_override = override; + pdev->driver_override = kasprintf(GFP_KERNEL, "%s", + override); + if (!pdev->driver_override) + goto err; if (type == IPMI_DMI_TYPE_SSIF) goto add_properties; From 34ad59099ead42461b968e36f939020821e82cef Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 1 Aug 2017 04:16:47 -0500 Subject: [PATCH 1944/3326] signal/openrisc: Fix do_unaligned_access to send the proper signal commit 500d58300571b6602341b041f97c082a461ef994 upstream. While reviewing the signal sending on openrisc the do_unaligned_access function stood out because it is obviously wrong. A comment about an si_code set above when actually si_code is never set. Leading to a random si_code being sent to userspace in the event of an unaligned access. Looking further SIGBUS BUS_ADRALN is the proper pair of signal and si_code to send for an unaligned access. That is what other architectures do and what is required by posix. Given that do_unaligned_access is broken in a way that no one can be relying on it on openrisc fix the code to just do the right thing. Fixes: 769a8a96229e ("OpenRISC: Traps") Cc: Jonas Bonn Cc: Stefan Kristiansson Cc: Stafford Horne Cc: Arnd Bergmann Cc: openrisc@lists.librecores.org Acked-by: Stafford Horne Signed-off-by: "Eric W. Biederman" Signed-off-by: Greg Kroah-Hartman --- arch/openrisc/kernel/traps.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 803e9e756f7785..8d8437169b5e74 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -306,12 +306,12 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address) siginfo_t info; if (user_mode(regs)) { - /* Send a SIGSEGV */ - info.si_signo = SIGSEGV; + /* Send a SIGBUS */ + info.si_signo = SIGBUS; info.si_errno = 0; - /* info.si_code has been set above */ - info.si_addr = (void *)address; - force_sig_info(SIGSEGV, &info, current); + info.si_code = BUS_ADRALN; + info.si_addr = (void __user *)address; + force_sig_info(SIGBUS, &info, current); } else { printk("KERNEL: Unaligned Access 0x%.8lx\n", address); show_registers(regs); From 16256f2658806cbd84d649ac700e673b16a06168 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 24 Jul 2017 17:30:30 -0500 Subject: [PATCH 1945/3326] signal/sh: Ensure si_signo is initialized in do_divide_error commit 0e88bb002a9b2ee8cc3cc9478ce2dc126f849696 upstream. Set si_signo. Cc: Yoshinori Sato Cc: Rich Felker Cc: Paul Mundt Cc: linux-sh@vger.kernel.org Fixes: 0983b31849bb ("sh: Wire up division and address error exceptions on SH-2A.") Signed-off-by: "Eric W. Biederman" Signed-off-by: Greg Kroah-Hartman --- arch/sh/kernel/traps_32.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 57cff00cad1780..b3770bb2621131 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -609,7 +609,8 @@ asmlinkage void do_divide_error(unsigned long r4) break; } - force_sig_info(SIGFPE, &info, current); + info.si_signo = SIGFPE; + force_sig_info(info.si_signo, &info, current); } #endif From 632a537b3c6b031834a7480f95fbf98132262376 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 2 Jan 2018 14:01:34 -0500 Subject: [PATCH 1946/3326] alpha: fix crash if pthread_create races with signal delivery commit 21ffceda1c8b3807615c40d440d7815e0c85d366 upstream. On alpha, a process will crash if it attempts to start a thread and a signal is delivered at the same time. The crash can be reproduced with this program: https://cygwin.com/ml/cygwin/2014-11/msg00473.html The reason for the crash is this: * we call the clone syscall * we go to the function copy_process * copy process calls copy_thread_tls, it is a wrapper around copy_thread * copy_thread sets the tls pointer: childti->pcb.unique = regs->r20 * copy_thread sets regs->r20 to zero * we go back to copy_process * copy process checks "if (signal_pending(current))" and returns -ERESTARTNOINTR * the clone syscall is restarted, but this time, regs->r20 is zero, so the new thread is created with zero tls pointer * the new thread crashes in start_thread when attempting to access tls The comment in the code says that setting the register r20 is some compatibility with OSF/1. But OSF/1 doesn't use the CLONE_SETTLS flag, so we don't have to zero r20 if CLONE_SETTLS is set. This patch fixes the bug by zeroing regs->r20 only if CLONE_SETTLS is not set. Signed-off-by: Mikulas Patocka Signed-off-by: Matt Turner Signed-off-by: Greg Kroah-Hartman --- arch/alpha/kernel/process.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 74bfb1f2d68e32..3a885253f48643 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -269,12 +269,13 @@ copy_thread(unsigned long clone_flags, unsigned long usp, application calling fork. */ if (clone_flags & CLONE_SETTLS) childti->pcb.unique = regs->r20; + else + regs->r20 = 0; /* OSF/1 has some strange fork() semantics. */ childti->pcb.usp = usp ?: rdusp(); *childregs = *regs; childregs->r0 = 0; childregs->r19 = 0; childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ - regs->r20 = 0; stack = ((struct switch_stack *) regs) - 1; *childstack = *stack; childstack->r26 = (unsigned long) ret_from_fork; From 90d17505219a7f382e6a295580e4382091bef7f6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 8 Nov 2017 16:02:13 +0100 Subject: [PATCH 1947/3326] alpha: osf_sys.c: fix put_tv32 regression commit 47669fb6b5951d0e09fc99719653e0ac92b50b99 upstream. There was a typo in the new version of put_tv32() that caused an unguarded access of a user space pointer, and failed to return the correct result in gettimeofday(), wait4(), usleep_thread() and old_adjtimex(). This fixes it to give the correct behavior again. Fixes: 1cc6c4635e9f ("osf_sys.c: switch handling of timeval32/itimerval32 to copy_{to,from}_user()") Signed-off-by: Arnd Bergmann Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- arch/alpha/kernel/osf_sys.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index ce3a675c0c4bd7..75a5c35a2067d0 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -964,8 +964,8 @@ static inline long put_tv32(struct timeval32 __user *o, struct timeval *i) { return copy_to_user(o, &(struct timeval32){ - .tv_sec = o->tv_sec, - .tv_usec = o->tv_usec}, + .tv_sec = i->tv_sec, + .tv_usec = i->tv_usec}, sizeof(struct timeval32)); } From 371c5ada2af7fc456d2c4ea5aa1e0ab6cb7a5efe Mon Sep 17 00:00:00 2001 From: Michael Cree Date: Fri, 24 Nov 2017 21:25:01 +1300 Subject: [PATCH 1948/3326] alpha: Fix mixed up args in EXC macro in futex operations commit 84e455361ec97ea6037d31d42a2955628ea2094b upstream. Fix the typo (mixed up arguments) in the EXC macro in the futex definitions introduced by commit ca282f697381 (alpha: add a helper for emitting exception table entries). Signed-off-by: Michael Cree Signed-off-by: Matt Turner Signed-off-by: Greg Kroah-Hartman --- arch/alpha/include/asm/futex.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h index d2e4da93e68ca0..ca3322536f7247 100644 --- a/arch/alpha/include/asm/futex.h +++ b/arch/alpha/include/asm/futex.h @@ -20,8 +20,8 @@ "3: .subsection 2\n" \ "4: br 1b\n" \ " .previous\n" \ - EXC(1b,3b,%1,$31) \ - EXC(2b,3b,%1,$31) \ + EXC(1b,3b,$31,%1) \ + EXC(2b,3b,$31,%1) \ : "=&r" (oldval), "=&r"(ret) \ : "r" (uaddr), "r"(oparg) \ : "memory") @@ -82,8 +82,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "3: .subsection 2\n" "4: br 1b\n" " .previous\n" - EXC(1b,3b,%0,$31) - EXC(2b,3b,%0,$31) + EXC(1b,3b,$31,%0) + EXC(2b,3b,$31,%0) : "+r"(ret), "=&r"(prev), "=&r"(cmp) : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) : "memory"); From bef0563f39cd457d090c0d0713f4699b670fd918 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 2 Jan 2018 13:59:54 -0500 Subject: [PATCH 1949/3326] alpha: fix reboot on Avanti platform commit 55fc633c41a08ce9244ff5f528f420b16b1e04d6 upstream. We need to define NEED_SRM_SAVE_RESTORE on the Avanti, otherwise we get machine check exception when attempting to reboot the machine. Signed-off-by: Mikulas Patocka Signed-off-by: Matt Turner Signed-off-by: Greg Kroah-Hartman --- arch/alpha/kernel/pci_impl.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h index 26231601630ed7..f332d88ffaffd5 100644 --- a/arch/alpha/kernel/pci_impl.h +++ b/arch/alpha/kernel/pci_impl.h @@ -144,7 +144,8 @@ struct pci_iommu_arena }; #if defined(CONFIG_ALPHA_SRM) && \ - (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA)) + (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA) || \ + defined(CONFIG_ALPHA_AVANTI)) # define NEED_SRM_SAVE_RESTORE #else # undef NEED_SRM_SAVE_RESTORE From 12ab9e1e8d61b2384f97e2e6fb2a17aec677b01a Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 2 Jan 2018 14:00:32 -0500 Subject: [PATCH 1950/3326] alpha: fix formating of stack content commit 4b01abdb32fc36abe877503bfbd33019159fad71 upstream. Since version 4.9, the kernel automatically breaks printk calls into multiple newlines unless pr_cont is used. Fix the alpha stacktrace code, so that it prints stack trace in four columns, as it was initially intended. Signed-off-by: Mikulas Patocka Signed-off-by: Matt Turner Signed-off-by: Greg Kroah-Hartman --- arch/alpha/kernel/traps.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 4bd99a7b1c41f4..f43bd05dede26c 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -160,11 +160,16 @@ void show_stack(struct task_struct *task, unsigned long *sp) for(i=0; i < kstack_depth_to_print; i++) { if (((long) stack & (THREAD_SIZE-1)) == 0) break; - if (i && ((i % 4) == 0)) - printk("\n "); - printk("%016lx ", *stack++); + if ((i % 4) == 0) { + if (i) + pr_cont("\n"); + printk(" "); + } else { + pr_cont(" "); + } + pr_cont("%016lx", *stack++); } - printk("\n"); + pr_cont("\n"); dik_show_trace(sp); } From e68d638e4931236aebcf3bf66f18136ba59f852e Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 5 Jan 2018 14:27:58 -0800 Subject: [PATCH 1951/3326] xtensa: fix futex_atomic_cmpxchg_inatomic commit ca47480921587ae30417dd234a9f79af188e3666 upstream. Return 0 if the operation was successful, not the userspace memory value. Check that userspace value equals passed oldval, not itself. Don't update *uval if the value wasn't read from userspace memory. This fixes process hang due to infinite loop in futex_lock_pi. It also fixes a bunch of glibc tests nptl/tst-mutexpi*. Signed-off-by: Max Filippov Signed-off-by: Greg Kroah-Hartman --- arch/xtensa/include/asm/futex.h | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h index eaaf1ebcc7a402..5bfbc1c401d4cf 100644 --- a/arch/xtensa/include/asm/futex.h +++ b/arch/xtensa/include/asm/futex.h @@ -92,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { int ret = 0; - u32 prev; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; @@ -103,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, __asm__ __volatile__ ( " # futex_atomic_cmpxchg_inatomic\n" - "1: l32i %1, %3, 0\n" - " mov %0, %5\n" - " wsr %1, scompare1\n" - "2: s32c1i %0, %3, 0\n" - "3:\n" + " wsr %5, scompare1\n" + "1: s32c1i %1, %4, 0\n" + " s32i %1, %6, 0\n" + "2:\n" " .section .fixup,\"ax\"\n" " .align 4\n" - "4: .long 3b\n" - "5: l32r %1, 4b\n" - " movi %0, %6\n" + "3: .long 2b\n" + "4: l32r %1, 3b\n" + " movi %0, %7\n" " jx %1\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .long 1b,5b,2b,5b\n" + " .long 1b,4b\n" " .previous\n" - : "+r" (ret), "=&r" (prev), "+m" (*uaddr) - : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) + : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval) + : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT) : "memory"); - *uval = prev; return ret; } From bed938ba673c9c5045fda278305ec541e5d13fc8 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Mon, 13 Nov 2017 16:12:06 +0000 Subject: [PATCH 1952/3326] EDAC, octeon: Fix an uninitialized variable warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 544e92581a2ac44607d7cc602c6b54d18656f56d upstream. Fix an uninitialized variable warning in the Octeon EDAC driver, as seen in MIPS cavium_octeon_defconfig builds since v4.14 with Codescape GNU Tools 2016.05-03: drivers/edac/octeon_edac-lmc.c In function ‘octeon_lmc_edac_poll_o2’: drivers/edac/octeon_edac-lmc.c:87:24: warning: ‘((long unsigned int*)&int_reg)[1]’ may \ be used uninitialized in this function [-Wmaybe-uninitialized] if (int_reg.s.sec_err || int_reg.s.ded_err) { ^ Iinitialise the whole int_reg variable to zero before the conditional assignments in the error injection case. Signed-off-by: James Hogan Acked-by: David Daney Cc: linux-edac Cc: linux-mips@linux-mips.org Fixes: 1bc021e81565 ("EDAC: Octeon: Add error injection support") Link: http://lkml.kernel.org/r/20171113161206.20990-1-james.hogan@mips.com Signed-off-by: Borislav Petkov Signed-off-by: Greg Kroah-Hartman --- drivers/edac/octeon_edac-lmc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c index 9c1ffe3e912bfb..aeb222ca3ed12c 100644 --- a/drivers/edac/octeon_edac-lmc.c +++ b/drivers/edac/octeon_edac-lmc.c @@ -78,6 +78,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci) if (!pvt->inject) int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx)); else { + int_reg.u64 = 0; if (pvt->error_type == 1) int_reg.s.sec_err = 1; if (pvt->error_type == 2) From 05c9297f34053ca2f6aea1fd9a07ddea1d4793a4 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Wed, 29 Nov 2017 16:25:44 +0300 Subject: [PATCH 1953/3326] pinctrl: intel: Initialize GPIO properly when used through irqchip commit f5a26acf0162477af6ee4c11b4fb9cffe5d3e257 upstream. When a GPIO is requested using gpiod_get_* APIs the intel pinctrl driver switches the pin to GPIO mode and makes sure interrupts are routed to the GPIO hardware instead of IOAPIC. However, if the GPIO is used directly through irqchip, as is the case with many I2C-HID devices where I2C core automatically configures interrupt for the device, the pin is not initialized as GPIO. Instead we rely that the BIOS configures the pin accordingly which seems not to be the case at least in Asus X540NA SKU3 with Focaltech touchpad. When the pin is not properly configured it might result weird behaviour like interrupts suddenly stop firing completely and the touchpad stops responding to user input. Fix this by properly initializing the pin to GPIO mode also when it is used directly through irqchip. Fixes: 7981c0015af2 ("pinctrl: intel: Add Intel Sunrisepoint pin controller and GPIO support") Reported-by: Daniel Drake Reported-and-tested-by: Chris Chiu Signed-off-by: Mika Westerberg Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/pinctrl/intel/pinctrl-intel.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 71df0f70b61f01..72b4527d690f43 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -427,6 +427,18 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) writel(value, padcfg0); } +static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) +{ + u32 value; + + /* Put the pad into GPIO mode */ + value = readl(padcfg0) & ~PADCFG0_PMODE_MASK; + /* Disable SCI/SMI/NMI generation */ + value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); + value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); + writel(value, padcfg0); +} + static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned pin) @@ -434,7 +446,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); void __iomem *padcfg0; unsigned long flags; - u32 value; raw_spin_lock_irqsave(&pctrl->lock, flags); @@ -444,13 +455,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, } padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); - /* Put the pad into GPIO mode */ - value = readl(padcfg0) & ~PADCFG0_PMODE_MASK; - /* Disable SCI/SMI/NMI generation */ - value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); - value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); - writel(value, padcfg0); - + intel_gpio_set_gpio_mode(padcfg0); /* Disable TX buffer and enable RX (this will be input) */ __intel_gpio_set_direction(padcfg0, true); @@ -935,6 +940,8 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type) raw_spin_lock_irqsave(&pctrl->lock, flags); + intel_gpio_set_gpio_mode(reg); + value = readl(reg); value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV); From 688d1b8c4721e247649383bbfcafedca3cdff89a Mon Sep 17 00:00:00 2001 From: Dmitry Mastykin Date: Thu, 28 Dec 2017 18:19:24 +0300 Subject: [PATCH 1954/3326] pinctrl: mcp23s08: fix irq setup order commit 02e389e63e3523828fc3832f27e0341885f60f6f upstream. When using mcp23s08 module with gpio-keys, often (50% of boots) it fails to get irq numbers with message: "gpio-keys keys: Unable to get irq number for GPIO 0, error -6". Seems that irqs must be setup before devm_gpiochip_add_data(). Signed-off-by: Dmitry Mastykin Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/pinctrl/pinctrl-mcp23s08.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index 9c950bbf07bab6..447763aad81503 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -891,16 +891,16 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, goto fail; } - ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); - if (ret < 0) - goto fail; - if (mcp->irq && mcp->irq_controller) { ret = mcp23s08_irq_setup(mcp); if (ret) goto fail; } + ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); + if (ret < 0) + goto fail; + mcp->pinctrl_desc.name = "mcp23xxx-pinctrl"; mcp->pinctrl_desc.pctlops = &mcp_pinctrl_ops; mcp->pinctrl_desc.confops = &mcp_pinconf_ops; From 89cad3fa54919ba00004e0e202f549391a074a7c Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Wed, 17 Jan 2018 14:34:21 +0100 Subject: [PATCH 1955/3326] pinctrl: sx150x: Unregister the pinctrl on release commit 0657cb50b5a75abd92956028727dc255d690a4a6 upstream. There is no matching call to pinctrl_unregister, so switch to the managed devm_pinctrl_register to clean up properly when done. Fixes: 9e80f9064e73 ("pinctrl: Add SX150X GPIO Extender Pinctrl Driver") Signed-off-by: Peter Rosin Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/pinctrl/pinctrl-sx150x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c index 7450f511844569..7b0d5972bb2159 100644 --- a/drivers/pinctrl/pinctrl-sx150x.c +++ b/drivers/pinctrl/pinctrl-sx150x.c @@ -1225,7 +1225,7 @@ static int sx150x_probe(struct i2c_client *client, pctl->pinctrl_desc.npins = pctl->data->npins; pctl->pinctrl_desc.owner = THIS_MODULE; - pctl->pctldev = pinctrl_register(&pctl->pinctrl_desc, dev, pctl); + pctl->pctldev = devm_pinctrl_register(dev, &pctl->pinctrl_desc, pctl); if (IS_ERR(pctl->pctldev)) { dev_err(dev, "Failed to register pinctrl device\n"); return PTR_ERR(pctl->pctldev); From 5219eedf2d5e19fdcd03aa685426eb891be85af6 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Wed, 17 Jan 2018 14:34:22 +0100 Subject: [PATCH 1956/3326] pinctrl: sx150x: Register pinctrl before adding the gpiochip commit 1a1d39e1b8dd1d0f92a79da4fcc1ab0be3ae9bfc upstream. Various gpiolib activity depend on the pinctrl to be up and kicking. Therefore, register the pinctrl before adding a gpiochip. Suggested-by: Linus Walleij Signed-off-by: Peter Rosin Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/pinctrl/pinctrl-sx150x.c | 35 +++++++++++++++++++------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c index 7b0d5972bb2159..7fa396f45c8cc0 100644 --- a/drivers/pinctrl/pinctrl-sx150x.c +++ b/drivers/pinctrl/pinctrl-sx150x.c @@ -1144,6 +1144,27 @@ static int sx150x_probe(struct i2c_client *client, if (ret) return ret; + /* Pinctrl_desc */ + pctl->pinctrl_desc.name = "sx150x-pinctrl"; + pctl->pinctrl_desc.pctlops = &sx150x_pinctrl_ops; + pctl->pinctrl_desc.confops = &sx150x_pinconf_ops; + pctl->pinctrl_desc.pins = pctl->data->pins; + pctl->pinctrl_desc.npins = pctl->data->npins; + pctl->pinctrl_desc.owner = THIS_MODULE; + + ret = devm_pinctrl_register_and_init(dev, &pctl->pinctrl_desc, + pctl, &pctl->pctldev); + if (ret) { + dev_err(dev, "Failed to register pinctrl device\n"); + return ret; + } + + ret = pinctrl_enable(pctl->pctldev); + if (ret) { + dev_err(dev, "Failed to enable pinctrl device\n"); + return ret; + } + /* Register GPIO controller */ pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL); pctl->gpio.base = -1; @@ -1217,20 +1238,6 @@ static int sx150x_probe(struct i2c_client *client, client->irq); } - /* Pinctrl_desc */ - pctl->pinctrl_desc.name = "sx150x-pinctrl"; - pctl->pinctrl_desc.pctlops = &sx150x_pinctrl_ops; - pctl->pinctrl_desc.confops = &sx150x_pinconf_ops; - pctl->pinctrl_desc.pins = pctl->data->pins; - pctl->pinctrl_desc.npins = pctl->data->npins; - pctl->pinctrl_desc.owner = THIS_MODULE; - - pctl->pctldev = devm_pinctrl_register(dev, &pctl->pinctrl_desc, pctl); - if (IS_ERR(pctl->pctldev)) { - dev_err(dev, "Failed to register pinctrl device\n"); - return PTR_ERR(pctl->pctldev); - } - return 0; } From f04fe1192e5607f203e7c0096340fc867da8e84c Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Wed, 17 Jan 2018 14:34:23 +0100 Subject: [PATCH 1957/3326] pinctrl: sx150x: Add a static gpio/pinctrl pin range mapping commit b930151e5b55a0e62a3aad06876de891ac980471 upstream. Without such a range, gpiolib fails with -EPROBE_DEFER, pending the addition of the range. So, without a range, gpiolib will keep deferring indefinitely. Fixes: 9e80f9064e73 ("pinctrl: Add SX150X GPIO Extender Pinctrl Driver") Fixes: e10f72bf4b3e ("gpio: gpiolib: Generalise state persistence beyond sleep") Suggested-by: Linus Walleij Signed-off-by: Peter Rosin Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/pinctrl/pinctrl-sx150x.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c index 7fa396f45c8cc0..70a0228f4e7fe5 100644 --- a/drivers/pinctrl/pinctrl-sx150x.c +++ b/drivers/pinctrl/pinctrl-sx150x.c @@ -1193,6 +1193,11 @@ static int sx150x_probe(struct i2c_client *client, if (ret) return ret; + ret = gpiochip_add_pin_range(&pctl->gpio, dev_name(dev), + 0, 0, pctl->data->npins); + if (ret) + return ret; + /* Add Interrupt support if an irq is specified */ if (client->irq > 0) { pctl->irq_chip.name = devm_kstrdup(dev, client->name, From 030dcf7d1af39b0dc341702cb7d2482df93d3c71 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 2 Jan 2018 11:39:47 -0800 Subject: [PATCH 1958/3326] pktcdvd: Fix pkt_setup_dev() error path commit 5a0ec388ef0f6e33841aeb810d7fa23f049ec4cd upstream. Commit 523e1d399ce0 ("block: make gendisk hold a reference to its queue") modified add_disk() and disk_release() but did not update any of the error paths that trigger a put_disk() call after disk->queue has been assigned. That introduced the following behavior in the pktcdvd driver if pkt_new_dev() fails: Kernel BUG at 00000000e98fd882 [verbose debug info unavailable] Since disk_release() calls blk_put_queue() anyway if disk->queue != NULL, fix this by removing the blk_cleanup_queue() call from the pkt_setup_dev() error path. Fixes: commit 523e1d399ce0 ("block: make gendisk hold a reference to its queue") Signed-off-by: Bart Van Assche Cc: Tejun Heo Cc: Maciej S. Szmigiero Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/block/pktcdvd.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 67974796c35094..2659b253407358 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2745,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) pd->pkt_dev = MKDEV(pktdev_major, idx); ret = pkt_new_dev(pd, dev); if (ret) - goto out_new_dev; + goto out_mem2; /* inherit events of the host device */ disk->events = pd->bdev->bd_disk->events; @@ -2763,8 +2763,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) mutex_unlock(&ctl_mutex); return 0; -out_new_dev: - blk_cleanup_queue(disk->queue); out_mem2: put_disk(disk); out_mem: From c846868070d0426667902c55a89008333584af50 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 2 Jan 2018 11:39:48 -0800 Subject: [PATCH 1959/3326] pktcdvd: Fix a recently introduced NULL pointer dereference commit 882d4171a8950646413b1a3cbe0e4a6a612fe82e upstream. Call bdev_get_queue(bdev) after bdev->bd_disk has been initialized instead of just before that pointer has been initialized. This patch avoids that the following command pktsetup 1 /dev/sr0 triggers the following kernel crash: BUG: unable to handle kernel NULL pointer dereference at 0000000000000548 IP: pkt_setup_dev+0x2db/0x670 [pktcdvd] CPU: 2 PID: 724 Comm: pktsetup Not tainted 4.15.0-rc4-dbg+ #1 Call Trace: pkt_ctl_ioctl+0xce/0x1c0 [pktcdvd] do_vfs_ioctl+0x8e/0x670 SyS_ioctl+0x3c/0x70 entry_SYSCALL_64_fastpath+0x23/0x9a Reported-by: Maciej S. Szmigiero Fixes: commit ca18d6f769d2 ("block: Make most scsi_req_init() calls implicit") Signed-off-by: Bart Van Assche Tested-by: Maciej S. Szmigiero Cc: Maciej S. Szmigiero Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/block/pktcdvd.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 2659b253407358..531a0915066b31 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2579,14 +2579,14 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) bdev = bdget(dev); if (!bdev) return -ENOMEM; + ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL); + if (ret) + return ret; if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) { WARN_ONCE(true, "Attempt to register a non-SCSI queue\n"); - bdput(bdev); + blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); return -EINVAL; } - ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL); - if (ret) - return ret; /* This is safe, since we have a reference from open(). */ __module_get(THIS_MODULE); From 392640fd18f8f7433820d618c02e7d3d696f3d64 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sat, 6 Jan 2018 16:27:37 +0800 Subject: [PATCH 1960/3326] blk-mq: quiesce queue before freeing queue commit c2856ae2f315d754a0b6a268e4c6745b332b42e7 upstream. After queue is frozen, dispatch still may happen, for example: 1) requests are submitted from several contexts 2) requests from all these contexts are inserted to queue, but may dispatch to LLD in one of these paths, but other paths sill need to move on even all these requests are completed(that means blk_mq_freeze_queue_wait() returns at that time) 3) dispatch after queue freezing still moves on and causes use-after-free, because request queue is freed This patch quiesces queue after it is frozen, and makes sure all in-progress dispatch are completed. This patch fixes the following kernel crash when running heavy IOs vs. deleting device: [ 36.719251] BUG: unable to handle kernel NULL pointer dereference at 0000000000000008 [ 36.720318] IP: kyber_has_work+0x14/0x40 [ 36.720847] PGD 254bf5067 P4D 254bf5067 PUD 255e6a067 PMD 0 [ 36.721584] Oops: 0000 [#1] PREEMPT SMP [ 36.722105] Dumping ftrace buffer: [ 36.722570] (ftrace buffer empty) [ 36.723057] Modules linked in: scsi_debug ebtable_filter ebtables ip6table_filter ip6_tables tcm_loop iscsi_target_mod target_core_file target_core_iblock target_core_pscsi target_core_mod xt_CHECKSUM iptable_mangle ipt_MASQUERADE nf_nat_masquerade_ipv4 iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat nf_conntrack libcrc32c bridge stp llc fuse iptable_filter ip_tables sd_mod sg btrfs xor zstd_decompress zstd_compress xxhash raid6_pq mptsas mptscsih bcache crc32c_intel ahci mptbase libahci serio_raw scsi_transport_sas nvme libata shpchp lpc_ich virtio_scsi nvme_core binfmt_misc dm_mod iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi null_blk configs [ 36.733438] CPU: 2 PID: 2374 Comm: fio Not tainted 4.15.0-rc2.blk_mq_quiesce+ #714 [ 36.735143] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.9.3-1.fc25 04/01/2014 [ 36.736688] RIP: 0010:kyber_has_work+0x14/0x40 [ 36.737515] RSP: 0018:ffffc9000209bca0 EFLAGS: 00010202 [ 36.738431] RAX: 0000000000000008 RBX: ffff88025578bfc8 RCX: ffff880257bf4ed0 [ 36.739581] RDX: 0000000000000038 RSI: ffffffff81a98c6d RDI: ffff88025578bfc8 [ 36.740730] RBP: ffff880253cebfc8 R08: ffffc9000209bda0 R09: ffff8802554f3480 [ 36.741885] R10: ffffc9000209be60 R11: ffff880263f72538 R12: ffff88025573e9e8 [ 36.743036] R13: ffff88025578bfd0 R14: 0000000000000001 R15: 0000000000000000 [ 36.744189] FS: 00007f9b9bee67c0(0000) GS:ffff88027fc80000(0000) knlGS:0000000000000000 [ 36.746617] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 36.748483] CR2: 0000000000000008 CR3: 0000000254bf4001 CR4: 00000000003606e0 [ 36.750164] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 36.751455] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 36.752796] Call Trace: [ 36.753992] blk_mq_do_dispatch_sched+0x7f/0xe0 [ 36.755110] blk_mq_sched_dispatch_requests+0x119/0x190 [ 36.756179] __blk_mq_run_hw_queue+0x83/0x90 [ 36.757144] __blk_mq_delay_run_hw_queue+0xaf/0x110 [ 36.758046] blk_mq_run_hw_queue+0x24/0x70 [ 36.758845] blk_mq_flush_plug_list+0x1e7/0x270 [ 36.759676] blk_flush_plug_list+0xd6/0x240 [ 36.760463] blk_finish_plug+0x27/0x40 [ 36.761195] do_io_submit+0x19b/0x780 [ 36.761921] ? entry_SYSCALL_64_fastpath+0x1a/0x7d [ 36.762788] entry_SYSCALL_64_fastpath+0x1a/0x7d [ 36.763639] RIP: 0033:0x7f9b9699f697 [ 36.764352] RSP: 002b:00007ffc10f991b8 EFLAGS: 00000206 ORIG_RAX: 00000000000000d1 [ 36.765773] RAX: ffffffffffffffda RBX: 00000000008f6f00 RCX: 00007f9b9699f697 [ 36.766965] RDX: 0000000000a5e6c0 RSI: 0000000000000001 RDI: 00007f9b8462a000 [ 36.768377] RBP: 0000000000000000 R08: 0000000000000001 R09: 00000000008f6420 [ 36.769649] R10: 00007f9b846e5000 R11: 0000000000000206 R12: 00007f9b795d6a70 [ 36.770807] R13: 00007f9b795e4140 R14: 00007f9b795e3fe0 R15: 0000000100000000 [ 36.771955] Code: 83 c7 10 e9 3f 68 d1 ff 0f 1f 44 00 00 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 48 8b 97 b0 00 00 00 48 8d 42 08 48 83 c2 38 <48> 3b 00 74 06 b8 01 00 00 00 c3 48 3b 40 08 75 f4 48 83 c0 10 [ 36.775004] RIP: kyber_has_work+0x14/0x40 RSP: ffffc9000209bca0 [ 36.776012] CR2: 0000000000000008 [ 36.776690] ---[ end trace 4045cbce364ff2a4 ]--- [ 36.777527] Kernel panic - not syncing: Fatal exception [ 36.778526] Dumping ftrace buffer: [ 36.779313] (ftrace buffer empty) [ 36.780081] Kernel Offset: disabled [ 36.780877] ---[ end Kernel panic - not syncing: Fatal exception Reviewed-by: Christoph Hellwig Tested-by: Yi Zhang Signed-off-by: Ming Lei Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- block/blk-core.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/block/blk-core.c b/block/blk-core.c index 7b30bf10b1d4f4..f3750389e351e1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -660,6 +660,15 @@ void blk_cleanup_queue(struct request_queue *q) queue_flag_set(QUEUE_FLAG_DEAD, q); spin_unlock_irq(lock); + /* + * make sure all in-progress dispatch are completed because + * blk_freeze_queue() can only complete all requests, and + * dispatch may still be in-progress since we dispatch requests + * from more than one contexts + */ + if (q->mq_ops) + blk_mq_quiesce_queue(q); + /* for synchronous bio-based driver finish in-flight integrity i/o */ blk_flush_integrity(); From c87806a8e565e1253cad5072577b759f76f79fdb Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Mon, 8 Jan 2018 14:28:50 +0100 Subject: [PATCH 1961/3326] clocksource/drivers/stm32: Fix kernel panic with multiple timers commit e0aeca3d8cbaea514eb98df1149faa918f9ec42d upstream. The current code hides a couple of bugs: - The global variable 'clock_event_ddata' is overwritten each time the init function is invoked. This is fixed with a kmemdup() instead of assigning the global variable. That prevents a memory corruption when several timers are defined in the DT. - The clockevent's event_handler is NULL if the time framework does not select the clockevent when registering it, this is fine but the init code generates in any case an interrupt leading to dereference this NULL pointer. The stm32 timer works with shadow registers, a mechanism to cache the registers. When a change is done in one buffered register, we need to artificially generate an event to force the timer to copy the content of the register to the shadowed register. The auto-reload register (ARR) is one of the shadowed register as well as the prescaler register (PSC), so in order to force the copy, we issue an event which in turn leads to an interrupt and the NULL dereference. This is fixed by inverting two lines where we clear the status register before enabling the update event interrupt. As this kernel crash is resulting from the combination of these two bugs, the fixes are grouped into a single patch. Tested-by: Benjamin Gaignard Signed-off-by: Daniel Lezcano Acked-by: Benjamin Gaignard Cc: Alexandre Torgue Cc: Linus Torvalds Cc: Maxime Coquelin Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1515418139-23276-11-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- drivers/clocksource/timer-stm32.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c index 8f2423789ba909..4bfeb9929ab2b0 100644 --- a/drivers/clocksource/timer-stm32.c +++ b/drivers/clocksource/timer-stm32.c @@ -106,6 +106,10 @@ static int __init stm32_clockevent_init(struct device_node *np) unsigned long rate, max_delta; int irq, ret, bits, prescaler = 1; + data = kmemdup(&clock_event_ddata, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + clk = of_clk_get(np, 0); if (IS_ERR(clk)) { ret = PTR_ERR(clk); @@ -156,8 +160,8 @@ static int __init stm32_clockevent_init(struct device_node *np) writel_relaxed(prescaler - 1, data->base + TIM_PSC); writel_relaxed(TIM_EGR_UG, data->base + TIM_EGR); - writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER); writel_relaxed(0, data->base + TIM_SR); + writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER); data->periodic_top = DIV_ROUND_CLOSEST(rate, prescaler * HZ); @@ -184,6 +188,7 @@ static int __init stm32_clockevent_init(struct device_node *np) err_clk_enable: clk_put(clk); err_clk_get: + kfree(data); return ret; } From 5a5df7771064defa931d44ed28fec673fe66752f Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 6 Feb 2018 15:40:38 -0800 Subject: [PATCH 1962/3326] lib/ubsan.c: s/missaligned/misaligned/ commit b8fe1120b4ba342b4f156d24e952d6e686b20298 upstream. A vist from the spelling fairy. Cc: David Laight Cc: Andrey Ryabinin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- lib/ubsan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ubsan.c b/lib/ubsan.c index fb0409df1bcf84..1e2328fa002dde 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -281,7 +281,7 @@ static void handle_null_ptr_deref(struct type_mismatch_data *data) ubsan_epilogue(&flags); } -static void handle_missaligned_access(struct type_mismatch_data *data, +static void handle_misaligned_access(struct type_mismatch_data *data, unsigned long ptr) { unsigned long flags; @@ -322,7 +322,7 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, if (!ptr) handle_null_ptr_deref(data); else if (data->alignment && !IS_ALIGNED(ptr, data->alignment)) - handle_missaligned_access(data, ptr); + handle_misaligned_access(data, ptr); else handle_object_size_mismatch(data, ptr); } From 2617e62c2f120fe02a14e01232db4127f8e414a2 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Tue, 6 Feb 2018 15:40:42 -0800 Subject: [PATCH 1963/3326] lib/ubsan: add type mismatch handler for new GCC/Clang commit 42440c1f9911b4b7b8ba3dc4e90c1197bc561211 upstream. UBSAN=y fails to build with new GCC/clang: arch/x86/kernel/head64.o: In function `sanitize_boot_params': arch/x86/include/asm/bootparam_utils.h:37: undefined reference to `__ubsan_handle_type_mismatch_v1' because Clang and GCC 8 slightly changed ABI for 'type mismatch' errors. Compiler now uses new __ubsan_handle_type_mismatch_v1() function with slightly modified 'struct type_mismatch_data'. Let's add new 'struct type_mismatch_data_common' which is independent from compiler's layout of 'struct type_mismatch_data'. And make __ubsan_handle_type_mismatch[_v1]() functions transform compiler-dependent type mismatch data to our internal representation. This way, we can support both old and new compilers with minimal amount of change. Link: http://lkml.kernel.org/r/20180119152853.16806-1-aryabinin@virtuozzo.com Signed-off-by: Andrey Ryabinin Reported-by: Sodagudi Prasad Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- lib/ubsan.c | 48 ++++++++++++++++++++++++++++++++++++++---------- lib/ubsan.h | 14 ++++++++++++++ 2 files changed, 52 insertions(+), 10 deletions(-) diff --git a/lib/ubsan.c b/lib/ubsan.c index 1e2328fa002dde..50d1d5c25deb89 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -265,14 +265,14 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data, } EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); -static void handle_null_ptr_deref(struct type_mismatch_data *data) +static void handle_null_ptr_deref(struct type_mismatch_data_common *data) { unsigned long flags; - if (suppress_report(&data->location)) + if (suppress_report(data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(data->location, &flags); pr_err("%s null pointer of type %s\n", type_check_kinds[data->type_check_kind], @@ -281,15 +281,15 @@ static void handle_null_ptr_deref(struct type_mismatch_data *data) ubsan_epilogue(&flags); } -static void handle_misaligned_access(struct type_mismatch_data *data, +static void handle_misaligned_access(struct type_mismatch_data_common *data, unsigned long ptr) { unsigned long flags; - if (suppress_report(&data->location)) + if (suppress_report(data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(data->location, &flags); pr_err("%s misaligned address %p for type %s\n", type_check_kinds[data->type_check_kind], @@ -299,15 +299,15 @@ static void handle_misaligned_access(struct type_mismatch_data *data, ubsan_epilogue(&flags); } -static void handle_object_size_mismatch(struct type_mismatch_data *data, +static void handle_object_size_mismatch(struct type_mismatch_data_common *data, unsigned long ptr) { unsigned long flags; - if (suppress_report(&data->location)) + if (suppress_report(data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(data->location, &flags); pr_err("%s address %p with insufficient space\n", type_check_kinds[data->type_check_kind], (void *) ptr); @@ -315,7 +315,7 @@ static void handle_object_size_mismatch(struct type_mismatch_data *data, ubsan_epilogue(&flags); } -void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, +static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, unsigned long ptr) { @@ -326,8 +326,36 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, else handle_object_size_mismatch(data, ptr); } + +void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, + unsigned long ptr) +{ + struct type_mismatch_data_common common_data = { + .location = &data->location, + .type = data->type, + .alignment = data->alignment, + .type_check_kind = data->type_check_kind + }; + + ubsan_type_mismatch_common(&common_data, ptr); +} EXPORT_SYMBOL(__ubsan_handle_type_mismatch); +void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data, + unsigned long ptr) +{ + + struct type_mismatch_data_common common_data = { + .location = &data->location, + .type = data->type, + .alignment = 1UL << data->log_alignment, + .type_check_kind = data->type_check_kind + }; + + ubsan_type_mismatch_common(&common_data, ptr); +} +EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1); + void __ubsan_handle_nonnull_return(struct nonnull_return_data *data) { unsigned long flags; diff --git a/lib/ubsan.h b/lib/ubsan.h index 88f23557edbe16..7e30b26497e0cd 100644 --- a/lib/ubsan.h +++ b/lib/ubsan.h @@ -37,6 +37,20 @@ struct type_mismatch_data { unsigned char type_check_kind; }; +struct type_mismatch_data_v1 { + struct source_location location; + struct type_descriptor *type; + unsigned char log_alignment; + unsigned char type_check_kind; +}; + +struct type_mismatch_data_common { + struct source_location *location; + struct type_descriptor *type; + unsigned long alignment; + unsigned char type_check_kind; +}; + struct nonnull_arg_data { struct source_location location; struct source_location attr_location; From 4063cd5683ce4d4a18e89a61b9dbf8492c9faf31 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 5 Dec 2017 09:29:19 +0200 Subject: [PATCH 1964/3326] btrfs: Handle btrfs_set_extent_delalloc failure in fixup worker commit f3038ee3a3f1017a1cbe9907e31fa12d366c5dcb upstream. This function was introduced by 247e743cbe6e ("Btrfs: Use async helpers to deal with pages that have been improperly dirtied") and it didn't do any error handling then. This function might very well fail in ENOMEM situation, yet it's not handled, this could lead to inconsistent state. So let's handle the failure by setting the mapping error bit. Signed-off-by: Nikolay Borisov Reviewed-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/inode.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c71afd424900ea..5eaedff28a32d6 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2101,8 +2101,15 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) goto out; } - btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state, - 0); + ret = btrfs_set_extent_delalloc(inode, page_start, page_end, + &cached_state, 0); + if (ret) { + mapping_set_error(page->mapping, ret); + end_extent_writepage(page, ret, page_start, page_end); + ClearPageChecked(page); + goto out; + } + ClearPageChecked(page); set_page_dirty(page); out: From 2a3d3015a73cff4a701db85b935f464492e49901 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 8 Feb 2018 14:02:32 +0100 Subject: [PATCH 1965/3326] objtool: Fix switch-table detection commit 99ce7962d52d1948ad6f2785e308d48e76e0a6ef upstream. Linus reported that GCC-7.3 generated a switch-table construct that confused objtool. It turns out that, in particular due to KASAN, it is possible to have unrelated .rodata usage in between the .rodata setup for the switch-table and the following indirect jump. The simple linear reverse search from the indirect jump would hit upon the KASAN .rodata usage first and fail to find a switch_table, resulting in a spurious 'sibling call with modified stack frame' warning. Fix this by creating a 'jump-stack' which we can 'unwind' during reversal, thereby skipping over much of the in-between code. This is not fool proof by any means, but is sufficient to make the known cases work. Future work would be to construct more comprehensive flow analysis code. Reported-and-tested-by: Linus Torvalds Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Cc: Borislav Petkov Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180208130232.GF25235@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/objtool/check.c | 41 +++++++++++++++++++++++++++++++++++++++-- tools/objtool/check.h | 1 + 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 9cd028aa150980..2e458eb45586c4 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -851,8 +851,14 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func, * This is a fairly uncommon pattern which is new for GCC 6. As of this * writing, there are 11 occurrences of it in the allmodconfig kernel. * + * As of GCC 7 there are quite a few more of these and the 'in between' code + * is significant. Esp. with KASAN enabled some of the code between the mov + * and jmpq uses .rodata itself, which can confuse things. + * * TODO: Once we have DWARF CFI and smarter instruction decoding logic, * ensure the same register is used in the mov and jump instructions. + * + * NOTE: RETPOLINE made it harder still to decode dynamic jumps. */ static struct rela *find_switch_table(struct objtool_file *file, struct symbol *func, @@ -874,12 +880,25 @@ static struct rela *find_switch_table(struct objtool_file *file, text_rela->addend + 4); if (!rodata_rela) return NULL; + file->ignore_unreachables = true; return rodata_rela; } /* case 3 */ - func_for_each_insn_continue_reverse(file, func, insn) { + /* + * Backward search using the @first_jump_src links, these help avoid + * much of the 'in between' code. Which avoids us getting confused by + * it. + */ + for (insn = list_prev_entry(insn, list); + + &insn->list != &file->insn_list && + insn->sec == func->sec && + insn->offset >= func->offset; + + insn = insn->first_jump_src ?: list_prev_entry(insn, list)) { + if (insn->type == INSN_JUMP_DYNAMIC) break; @@ -909,14 +928,32 @@ static struct rela *find_switch_table(struct objtool_file *file, return NULL; } + static int add_func_switch_tables(struct objtool_file *file, struct symbol *func) { - struct instruction *insn, *prev_jump = NULL; + struct instruction *insn, *last = NULL, *prev_jump = NULL; struct rela *rela, *prev_rela = NULL; int ret; func_for_each_insn(file, func, insn) { + if (!last) + last = insn; + + /* + * Store back-pointers for unconditional forward jumps such + * that find_switch_table() can back-track using those and + * avoid some potentially confusing code. + */ + if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && + insn->offset > last->offset && + insn->jump_dest->offset > insn->offset && + !insn->jump_dest->first_jump_src) { + + insn->jump_dest->first_jump_src = insn; + last = insn->jump_dest; + } + if (insn->type != INSN_JUMP_DYNAMIC) continue; diff --git a/tools/objtool/check.h b/tools/objtool/check.h index dbadb304a410af..23a1d065cae190 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h @@ -47,6 +47,7 @@ struct instruction { bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts; struct symbol *call_dest; struct instruction *jump_dest; + struct instruction *first_jump_src; struct list_head alts; struct symbol *func; struct stack_op stack_op; From 812245b4dbf663aa8c4d5483e2e7c5d2c0fb01fb Mon Sep 17 00:00:00 2001 From: Yan Markman Date: Wed, 3 Jan 2018 16:18:52 +0100 Subject: [PATCH 1966/3326] arm64: dts: marvell: add Ethernet aliases commit 474c5885582c4a79c21bcf01ed98f98c935f1f4a upstream. This patch adds Ethernet aliases in the Marvell Armada 7040 DB, 8040 DB and 8040 mcbin device trees so that the bootloader setup the MAC addresses correctly. Signed-off-by: Yan Markman [Antoine: commit message, small fixes] Signed-off-by: Antoine Tenart Signed-off-by: Gregory CLEMENT Signed-off-by: Greg Kroah-Hartman --- arch/arm64/boot/dts/marvell/armada-7040-db.dts | 6 ++++++ arch/arm64/boot/dts/marvell/armada-8040-db.dts | 7 +++++++ arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts | 6 ++++++ 3 files changed, 19 insertions(+) diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts index 9c3bdf87e5433f..51327645b3fbe4 100644 --- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts +++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts @@ -61,6 +61,12 @@ reg = <0x0 0x0 0x0 0x80000000>; }; + aliases { + ethernet0 = &cpm_eth0; + ethernet1 = &cpm_eth1; + ethernet2 = &cpm_eth2; + }; + cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus { compatible = "regulator-fixed"; regulator-name = "usb3h0-vbus"; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts index 0d7b2ae4661002..a4f82f1efbbc3f 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts @@ -61,6 +61,13 @@ reg = <0x0 0x0 0x0 0x80000000>; }; + aliases { + ethernet0 = &cpm_eth0; + ethernet1 = &cpm_eth2; + ethernet2 = &cps_eth0; + ethernet3 = &cps_eth1; + }; + cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus { compatible = "regulator-fixed"; regulator-name = "cpm-usb3h0-vbus"; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts index acf5c7d16d79b2..2b6b792dab9325 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts @@ -62,6 +62,12 @@ reg = <0x0 0x0 0x0 0x80000000>; }; + aliases { + ethernet0 = &cpm_eth0; + ethernet1 = &cps_eth0; + ethernet2 = &cps_eth1; + }; + /* Regulator labels correspond with schematics */ v_3_3: regulator-3-3v { compatible = "regulator-fixed"; From a51421b4cb099ce36cbc85689cabc0b3525d0e29 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Wed, 29 Nov 2017 19:51:37 +0200 Subject: [PATCH 1967/3326] drm/i915: Avoid PPS HW/SW state mismatch due to rounding MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 5643205c6340b565a3be0fe0e7305dc4aa551c74 upstream. We store a SW state of the t11_t12 timing in 100usec units but have to program it in 100msec as required by HW. The rounding used during programming means there will be a mismatch between the SW and HW states of this value triggering a "PPS state mismatch" error. Avoid this by storing the already rounded-up value in the SW state. Note that we still calculate panel_power_cycle_delay with the finer 100usec granularity to avoid any needless waits using that version of the delay. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103903 Cc: joks Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20171129175137.2889-1-imre.deak@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_dp.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 09f274419eea1c..76cf68745870be 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -5340,6 +5340,12 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, */ final->t8 = 1; final->t9 = 1; + + /* + * HW has only a 100msec granularity for t11_t12 so round it up + * accordingly. + */ + final->t11_t12 = roundup(final->t11_t12, 100 * 10); } static void From f232bfdcdd768bb6ba5945f1f8f28faa94fc10b8 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 19 Jan 2018 10:06:03 +0100 Subject: [PATCH 1968/3326] ACPI: sbshc: remove raw pointer from printk() message commit 43cdd1b716b26f6af16da4e145b6578f98798bf6 upstream. There's no need to be printing a raw kernel pointer to the kernel log at every boot. So just remove it, and change the whole message to use the correct dev_info() call at the same time. Reported-by: Wang Qize Signed-off-by: Greg Kroah-Hartman Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/sbshc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index 2fa8304171e09b..7a3431018e0ab4 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c @@ -275,8 +275,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device) device->driver_data = hc; acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc); - printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n", - hc->ec, hc->offset, hc->query_bit); + dev_info(&device->dev, "SBS HC: offset = 0x%0x, query_bit = 0x%0x\n", + hc->offset, hc->query_bit); return 0; } From 9fc03876355ac0bbbb774d15a362e35805c9db85 Mon Sep 17 00:00:00 2001 From: Toshi Kani Date: Fri, 2 Feb 2018 14:00:36 -0700 Subject: [PATCH 1969/3326] acpi, nfit: fix register dimm error handling commit 23fbd7c70aec7600e3227eb24259fc55bf6e4881 upstream. A NULL pointer reference kernel bug was observed when acpi_nfit_add_dimm() called in acpi_nfit_register_dimms() failed. This error path does not set nfit_mem->nvdimm, but the 2nd list_for_each_entry() loop in the function assumes it's always set. Add a check to nfit_mem->nvdimm. Fixes: ba9c8dd3c222 ("acpi, nfit: add dimm device notification support") Signed-off-by: Toshi Kani Cc: "Rafael J. Wysocki" Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/nfit/core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index dea0fb3d6f64a5..f14b4326e855bc 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1618,6 +1618,9 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) struct kernfs_node *nfit_kernfs; nvdimm = nfit_mem->nvdimm; + if (!nvdimm) + continue; + nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); if (nfit_kernfs) nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, From e822be7502707b994a4847f618b49f17d1c02dae Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Wed, 8 Nov 2017 09:39:46 +0200 Subject: [PATCH 1970/3326] ovl: fix failure to fsync lower dir MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d796e77f1dd541fe34481af2eee6454688d13982 upstream. As a writable mount, it is not expected for overlayfs to return EINVAL/EROFS for fsync, even if dir/file is not changed. This commit fixes the case of fsync of directory, which is easier to address, because overlayfs already implements fsync file operation for directories. The problem reported by Raphael is that new PostgreSQL 10.0 with a database in overlayfs where lower layer in squashfs fails to start. The failure is due to fsync error, when PostgreSQL does fsync on all existing db directories on startup and a specific directory exists lower layer with no changes. Reported-by: Raphael Hertzog Signed-off-by: Amir Goldstein Tested-by: Raphaël Hertzog Signed-off-by: Miklos Szeredi Signed-off-by: Greg Kroah-Hartman --- fs/overlayfs/readdir.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index d94a51dc4e32da..f1e9b24c994516 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -751,10 +751,14 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, struct dentry *dentry = file->f_path.dentry; struct file *realfile = od->realfile; + /* Nothing to sync for lower */ + if (!OVL_TYPE_UPPER(ovl_path_type(dentry))) + return 0; + /* * Need to check if we started out being a lower dir, but got copied up */ - if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) { + if (!od->is_upper) { struct inode *inode = file_inode(file); realfile = READ_ONCE(od->upperfile); From c7aee3941e1e10a662e5b22f345933280f0caac8 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Wed, 3 Jan 2018 18:54:42 +0200 Subject: [PATCH 1971/3326] ovl: take mnt_want_write() for removing impure xattr commit a5a927a7c82e28ea76599dee4019c41e372c911f upstream. The optimization in ovl_cache_get_impure() that tries to remove an unneeded "impure" xattr needs to take mnt_want_write() on upper fs. Fixes: 4edb83bb1041 ("ovl: constant d_ino for non-merge dirs") Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Greg Kroah-Hartman --- fs/overlayfs/readdir.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index f1e9b24c994516..7fa7d68baa6d19 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -575,8 +575,15 @@ static struct ovl_dir_cache *ovl_cache_get_impure(struct path *path) return ERR_PTR(res); } if (list_empty(&cache->entries)) { - /* Good oportunity to get rid of an unnecessary "impure" flag */ - ovl_do_removexattr(ovl_dentry_upper(dentry), OVL_XATTR_IMPURE); + /* + * A good opportunity to get rid of an unneeded "impure" flag. + * Removing the "impure" xattr is best effort. + */ + if (!ovl_want_write(dentry)) { + ovl_do_removexattr(ovl_dentry_upper(dentry), + OVL_XATTR_IMPURE); + ovl_drop_write(dentry); + } ovl_clear_flag(OVL_IMPURE, d_inode(dentry)); kfree(cache); return NULL; From dff5406432ffd805e6917d7b7005e1098354596b Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 1 Aug 2017 05:02:38 -0500 Subject: [PATCH 1972/3326] mn10300/misalignment: Use SIGSEGV SEGV_MAPERR to report a failed user copy commit 6ac1dc736b323011a55ecd1fc5897c24c4f77cbd upstream. Setting si_code to 0 is the same a setting si_code to SI_USER which is definitely not correct. With si_code set to SI_USER si_pid and si_uid will be copied to userspace instead of si_addr. Which is very wrong. So fix this by using a sensible si_code (SEGV_MAPERR) for this failure. Fixes: b920de1b77b7 ("mn10300: add the MN10300/AM33 architecture to the kernel") Cc: David Howells Cc: Masakazu Urade Cc: Koichi Yasutake Signed-off-by: "Eric W. Biederman" Signed-off-by: Greg Kroah-Hartman --- arch/mn10300/mm/misalignment.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index b39a388825ae33..8ace89617c1c91 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -437,7 +437,7 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) info.si_signo = SIGSEGV; info.si_errno = 0; - info.si_code = 0; + info.si_code = SEGV_MAPERR; info.si_addr = (void *) regs->pc; force_sig_info(SIGSEGV, &info, current); return; From d9ef4003744321041b8cd2495a576c0be83c4b91 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 31 Jan 2018 00:49:18 -0800 Subject: [PATCH 1973/3326] devpts: fix error handling in devpts_mntget() commit c9cc8d01fb04117928830449388512a5047569c9 upstream. If devpts_ptmx_path() returns an error code, then devpts_mntget() dereferences an ERR_PTR(): BUG: unable to handle kernel paging request at fffffffffffffff5 IP: devpts_mntget+0x13f/0x280 fs/devpts/inode.c:173 Fix it by returning early in the error paths. Reproducer: #define _GNU_SOURCE #include #include #include #define TIOCGPTPEER _IO('T', 0x41) int main() { for (;;) { int fd = open("/dev/ptmx", 0); unshare(CLONE_NEWNS); ioctl(fd, TIOCGPTPEER, 0); } } Fixes: 311fc65c9fb9 ("pty: Repair TIOCGPTPEER") Reported-by: syzbot Signed-off-by: Eric Biggers Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/devpts/inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 7eae33ffa3fcc1..e31d6ed3ec3275 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -168,11 +168,11 @@ struct vfsmount *devpts_mntget(struct file *filp, struct pts_fs_info *fsi) dput(path.dentry); if (err) { mntput(path.mnt); - path.mnt = ERR_PTR(err); + return ERR_PTR(err); } if (DEVPTS_SB(path.mnt->mnt_sb) != fsi) { mntput(path.mnt); - path.mnt = ERR_PTR(-ENODEV); + return ERR_PTR(-ENODEV); } return path.mnt; } From b470fb7b791039d8afea0e4ebcbd6f4a41723c47 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 5 Feb 2018 22:05:31 -0500 Subject: [PATCH 1974/3326] ftrace: Remove incorrect setting of glob search field commit 7b6586562708d2b3a04fe49f217ddbadbbbb0546 upstream. __unregister_ftrace_function_probe() will incorrectly parse the glob filter because it resets the search variable that was setup by filter_parse_regex(). Al Viro reported this: After that call of filter_parse_regex() we could have func_g.search not equal to glob only if glob started with '!' or '*'. In the former case we would've buggered off with -EINVAL (not = 1). In the latter we would've set func_g.search equal to glob + 1, calculated the length of that thing in func_g.len and proceeded to reset func_g.search back to glob. Suppose the glob is e.g. *foo*. We end up with func_g.type = MATCH_MIDDLE_ONLY; func_g.len = 3; func_g.search = "*foo"; Feeding that to ftrace_match_record() will not do anything sane - we will be looking for names containing "*foo" (->len is ignored for that one). Link: http://lkml.kernel.org/r/20180127031706.GE13338@ZenIV.linux.org.uk Fixes: 3ba009297149f ("ftrace: Introduce ftrace_glob structure") Reviewed-by: Dmitry Safonov <0x7f454c46@gmail.com> Reviewed-by: Masami Hiramatsu Reported-by: Al Viro Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/ftrace.c | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 8319e09e15b945..7379bcf3baa058 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -4488,7 +4488,6 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, func_g.type = filter_parse_regex(glob, strlen(glob), &func_g.search, ¬); func_g.len = strlen(func_g.search); - func_g.search = glob; /* we do not support '!' for function probes */ if (WARN_ON(not)) From c561093ed6843684690436dea034af53b462cfe5 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 4 Dec 2017 10:06:23 -0800 Subject: [PATCH 1975/3326] scsi: core: Ensure that the SCSI error handler gets woken up commit 3bd6f43f5cb3714f70c591514f344389df593501 upstream. If scsi_eh_scmd_add() is called concurrently with scsi_host_queue_ready() while shost->host_blocked > 0 then it can happen that neither function wakes up the SCSI error handler. Fix this by making every function that decreases the host_busy counter wake up the error handler if necessary and by protecting the host_failed checks with the SCSI host lock. Reported-by: Pavel Tikhomirov References: https://marc.info/?l=linux-kernel&m=150461610630736 Fixes: commit 746650160866 ("scsi: convert host_busy to atomic_t") Signed-off-by: Bart Van Assche Reviewed-by: Pavel Tikhomirov Tested-by: Stuart Hayes Cc: Konstantin Khorenko Cc: Stuart Hayes Cc: Pavel Tikhomirov Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Johannes Thumshirn Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/hosts.c | 6 ++++++ drivers/scsi/scsi_error.c | 18 ++++++++++++++++-- drivers/scsi/scsi_lib.c | 39 ++++++++++++++++++++++++++++----------- include/scsi/scsi_host.h | 2 ++ 4 files changed, 52 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index fe3a0da3ec9783..57bf43e34863ec 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -318,6 +318,9 @@ static void scsi_host_dev_release(struct device *dev) scsi_proc_hostdir_rm(shost->hostt); + /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */ + rcu_barrier(); + if (shost->tmf_work_q) destroy_workqueue(shost->tmf_work_q); if (shost->ehandler) @@ -325,6 +328,8 @@ static void scsi_host_dev_release(struct device *dev) if (shost->work_q) destroy_workqueue(shost->work_q); + destroy_rcu_head(&shost->rcu); + if (shost->shost_state == SHOST_CREATED) { /* * Free the shost_dev device name here if scsi_host_alloc() @@ -399,6 +404,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) INIT_LIST_HEAD(&shost->starved_list); init_waitqueue_head(&shost->host_wait); mutex_init(&shost->scan_mutex); + init_rcu_head(&shost->rcu); index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL); if (index < 0) diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index dab876c6547392..fa504ba83ade6f 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -220,6 +220,17 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd) } } +static void scsi_eh_inc_host_failed(struct rcu_head *head) +{ + struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + shost->host_failed++; + scsi_eh_wakeup(shost); + spin_unlock_irqrestore(shost->host_lock, flags); +} + /** * scsi_eh_scmd_add - add scsi cmd to error handling. * @scmd: scmd to run eh on. @@ -242,9 +253,12 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) scsi_eh_reset(scmd); list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); - shost->host_failed++; - scsi_eh_wakeup(shost); spin_unlock_irqrestore(shost->host_lock, flags); + /* + * Ensure that all tasks observe the host state change before the + * host_failed change. + */ + call_rcu(&shost->rcu, scsi_eh_inc_host_failed); } /** diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 635cfa1f2aced8..0d3696e9ddddbd 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -318,22 +318,39 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) cmd->cmd_len = scsi_command_size(cmd->cmnd); } -void scsi_device_unbusy(struct scsi_device *sdev) +/* + * Decrement the host_busy counter and wake up the error handler if necessary. + * Avoid as follows that the error handler is not woken up if shost->host_busy + * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination + * with an RCU read lock in this function to ensure that this function in its + * entirety either finishes before scsi_eh_scmd_add() increases the + * host_failed counter or that it notices the shost state change made by + * scsi_eh_scmd_add(). + */ +static void scsi_dec_host_busy(struct Scsi_Host *shost) { - struct Scsi_Host *shost = sdev->host; - struct scsi_target *starget = scsi_target(sdev); unsigned long flags; + rcu_read_lock(); atomic_dec(&shost->host_busy); - if (starget->can_queue > 0) - atomic_dec(&starget->target_busy); - - if (unlikely(scsi_host_in_recovery(shost) && - (shost->host_failed || shost->host_eh_scheduled))) { + if (unlikely(scsi_host_in_recovery(shost))) { spin_lock_irqsave(shost->host_lock, flags); - scsi_eh_wakeup(shost); + if (shost->host_failed || shost->host_eh_scheduled) + scsi_eh_wakeup(shost); spin_unlock_irqrestore(shost->host_lock, flags); } + rcu_read_unlock(); +} + +void scsi_device_unbusy(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct scsi_target *starget = scsi_target(sdev); + + scsi_dec_host_busy(shost); + + if (starget->can_queue > 0) + atomic_dec(&starget->target_busy); atomic_dec(&sdev->device_busy); } @@ -1532,7 +1549,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, list_add_tail(&sdev->starved_entry, &shost->starved_list); spin_unlock_irq(shost->host_lock); out_dec: - atomic_dec(&shost->host_busy); + scsi_dec_host_busy(shost); return 0; } @@ -1993,7 +2010,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_OK; out_dec_host_busy: - atomic_dec(&shost->host_busy); + scsi_dec_host_busy(shost); out_dec_target_busy: if (scsi_target(sdev)->can_queue > 0) atomic_dec(&scsi_target(sdev)->target_busy); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index a8b7bf879cede4..1a1df0d21ee3f9 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -571,6 +571,8 @@ struct Scsi_Host { struct blk_mq_tag_set tag_set; }; + struct rcu_head rcu; + atomic_t host_busy; /* commands actually active on low-level */ atomic_t host_blocked; From ce6faf10fd6544febcf5c3efe453193def873c60 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 7 Dec 2017 09:40:38 -0800 Subject: [PATCH 1976/3326] rcu: Export init_rcu_head() and destroy_rcu_head() to GPL modules commit 156baec39732f025dc778e00da95fc10d6e45885 upstream. Use of init_rcu_head() and destroy_rcu_head() from modules results in the following build-time error with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y: ERROR: "init_rcu_head" [drivers/scsi/scsi_mod.ko] undefined! ERROR: "destroy_rcu_head" [drivers/scsi/scsi_mod.ko] undefined! This commit therefore adds EXPORT_SYMBOL_GPL() for each to allow them to be used by GPL-licensed kernel modules. Reported-by: Bart Van Assche Reported-by: Stephen Rothwell Signed-off-by: Paul E. McKenney Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- kernel/rcu/update.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 5033b66d27530f..7a577bd989a4a3 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -421,11 +421,13 @@ void init_rcu_head(struct rcu_head *head) { debug_object_init(head, &rcuhead_debug_descr); } +EXPORT_SYMBOL_GPL(init_rcu_head); void destroy_rcu_head(struct rcu_head *head) { debug_object_free(head, &rcuhead_debug_descr); } +EXPORT_SYMBOL_GPL(destroy_rcu_head); static bool rcuhead_is_static_object(void *addr) { From 5fc77964eaa97884e630487254e4c265644fb116 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 20 Nov 2017 16:00:31 -0800 Subject: [PATCH 1977/3326] scsi: lpfc: Fix crash after bad bar setup on driver attachment commit e4b9794efdce13242f4af6682f3ed48ce3864a87 upstream. In test cases where an instance of the driver is detached and reattached, the driver will crash on reattachment. There is a compound if statement that will skip over the bar setup if the pci_resource_start call is not successful. The driver erroneously returns success to its bar setup in this scenario even though the bars aren't properly configured. Rework the offending code segment for proper initialization steps. If the pci_resource_start call fails, -ENOMEM is now returned. Sample stack: rport-5:0-10: blocked FC remote port time out: removing rport BUG: unable to handle kernel NULL pointer dereference at (null) ... lpfc_sli4_wait_bmbx_ready+0x32/0x70 [lpfc] ... ... RIP: 0010:... ... lpfc_sli4_wait_bmbx_ready+0x32/0x70 [lpfc] Call Trace: ... lpfc_sli4_post_sync_mbox+0x106/0x4d0 [lpfc] ... ? __alloc_pages_nodemask+0x176/0x420 ... ? __kmalloc+0x2e/0x230 ... lpfc_sli_issue_mbox_s4+0x533/0x720 [lpfc] ... ? mempool_alloc+0x69/0x170 ... ? dma_generic_alloc_coherent+0x8f/0x140 ... lpfc_sli_issue_mbox+0xf/0x20 [lpfc] ... lpfc_sli4_driver_resource_setup+0xa6f/0x1130 [lpfc] ... ? lpfc_pci_probe_one+0x23e/0x16f0 [lpfc] ... lpfc_pci_probe_one+0x445/0x16f0 [lpfc] ... local_pci_probe+0x45/0xa0 ... work_for_cpu_fn+0x14/0x20 ... process_one_work+0x17a/0x440 Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_init.c | 84 +++++++++++++++++++++-------------- 1 file changed, 51 insertions(+), 33 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 6acf1bb1d32001..25612ccf6ff28e 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -9413,44 +9413,62 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) lpfc_sli4_bar0_register_memmap(phba, if_type); } - if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && - (pci_resource_start(pdev, PCI_64BIT_BAR2))) { - /* - * Map SLI4 if type 0 HBA Control Register base to a kernel - * virtual address and setup the registers. - */ - phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); - bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); - phba->sli4_hba.ctrl_regs_memmap_p = - ioremap(phba->pci_bar1_map, bar1map_len); - if (!phba->sli4_hba.ctrl_regs_memmap_p) { - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for SLI4 HBA control registers.\n"); + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { + /* + * Map SLI4 if type 0 HBA Control Register base to a + * kernel virtual address and setup the registers. + */ + phba->pci_bar1_map = pci_resource_start(pdev, + PCI_64BIT_BAR2); + bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); + phba->sli4_hba.ctrl_regs_memmap_p = + ioremap(phba->pci_bar1_map, + bar1map_len); + if (!phba->sli4_hba.ctrl_regs_memmap_p) { + dev_err(&pdev->dev, + "ioremap failed for SLI4 HBA " + "control registers.\n"); + error = -ENOMEM; + goto out_iounmap_conf; + } + phba->pci_bar2_memmap_p = + phba->sli4_hba.ctrl_regs_memmap_p; + lpfc_sli4_bar1_register_memmap(phba); + } else { + error = -ENOMEM; goto out_iounmap_conf; } - phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; - lpfc_sli4_bar1_register_memmap(phba); } - if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && - (pci_resource_start(pdev, PCI_64BIT_BAR4))) { - /* - * Map SLI4 if type 0 HBA Doorbell Register base to a kernel - * virtual address and setup the registers. - */ - phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); - bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); - phba->sli4_hba.drbl_regs_memmap_p = - ioremap(phba->pci_bar2_map, bar2map_len); - if (!phba->sli4_hba.drbl_regs_memmap_p) { - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for SLI4 HBA doorbell registers.\n"); - goto out_iounmap_ctrl; - } - phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; - error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); - if (error) + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { + /* + * Map SLI4 if type 0 HBA Doorbell Register base to + * a kernel virtual address and setup the registers. + */ + phba->pci_bar2_map = pci_resource_start(pdev, + PCI_64BIT_BAR4); + bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); + phba->sli4_hba.drbl_regs_memmap_p = + ioremap(phba->pci_bar2_map, + bar2map_len); + if (!phba->sli4_hba.drbl_regs_memmap_p) { + dev_err(&pdev->dev, + "ioremap failed for SLI4 HBA" + " doorbell registers.\n"); + error = -ENOMEM; + goto out_iounmap_ctrl; + } + phba->pci_bar4_memmap_p = + phba->sli4_hba.drbl_regs_memmap_p; + error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); + if (error) + goto out_iounmap_all; + } else { + error = -ENOMEM; goto out_iounmap_all; + } } return 0; From 1dbdcf117b227eb966691818e850a2fde589f2a2 Mon Sep 17 00:00:00 2001 From: Uma Krishnan Date: Wed, 3 Jan 2018 16:54:02 -0600 Subject: [PATCH 1978/3326] scsi: cxlflash: Reset command ioasc commit 96cf727fe8f102bf92150b741db71ee39fb8c521 upstream. In the event of a command failure, cxlflash returns the failure to the upper layers to process. After processing the error, when the command is queued again, the private command structure will not be zeroed and the ioasc could be stale. Per the SISLite specification, the AFU only sets the ioasc in the presence of a failure. Thus, even though the original command succeeds the second time, the command is considered a failure due to stale ioasc. This cycle repeats indefinitely and can cause a hang or IO failure. To fix the issue, clear the ioasc before queuing any command. [mkp: added Cc: stable per request] Fixes: 479ad8e9d48c ("scsi: cxlflash: Remove zeroing of private command data") Signed-off-by: Uma Krishnan Acked-by: Matthew R. Ochs Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/cxlflash/main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 76b8b7eed0c03b..0b6467206f8ea7 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -620,6 +620,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) cmd->parent = afu; cmd->hwq_index = hwq_index; + cmd->sa.ioasc = 0; cmd->rcb.ctx_id = hwq->ctx_hndl; cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); From 7e83b2ff485cacbf73d27f821e07a8c78ad8cc68 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 16 Feb 2018 20:23:12 +0100 Subject: [PATCH 1979/3326] Linux 4.14.20 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a7160d6f0f2d0a..33176140f1336e 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 19 +SUBLEVEL = 20 EXTRAVERSION = NAME = Petit Gorille From b6f2efb86440af16ecdf08e23924a4dc52c1bd4a Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 15 Jan 2018 11:07:27 -0800 Subject: [PATCH 1980/3326] tracing: Prevent PROFILE_ALL_BRANCHES when FORTIFY_SOURCE=y commit 68e76e034b6b1c1ce2eece1ab8ae4008e14be470 upstream. I regularly get 50 MB - 60 MB files during kernel randconfig builds. These large files mostly contain (many repeats of; e.g., 124,594): In file included from ../include/linux/string.h:6:0, from ../include/linux/uuid.h:20, from ../include/linux/mod_devicetable.h:13, from ../scripts/mod/devicetable-offsets.c:3: ../include/linux/compiler.h:64:4: warning: '______f' is static but declared in inline function 'strcpy' which is not static [enabled by default] ______f = { \ ^ ../include/linux/compiler.h:56:23: note: in expansion of macro '__trace_if' ^ ../include/linux/string.h:425:2: note: in expansion of macro 'if' if (p_size == (size_t)-1 && q_size == (size_t)-1) ^ This only happens when CONFIG_FORTIFY_SOURCE=y and CONFIG_PROFILE_ALL_BRANCHES=y, so prevent PROFILE_ALL_BRANCHES if FORTIFY_SOURCE=y. Link: http://lkml.kernel.org/r/9199446b-a141-c0c3-9678-a3f9107f2750@infradead.org Signed-off-by: Randy Dunlap Signed-off-by: Steven Rostedt (VMware) Cc: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- kernel/trace/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 434c840e2d82f6..4ad6f6ca18c1ea 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -343,7 +343,7 @@ config PROFILE_ANNOTATED_BRANCHES on if you need to profile the system's use of these macros. config PROFILE_ALL_BRANCHES - bool "Profile all if conditionals" + bool "Profile all if conditionals" if !FORTIFY_SOURCE select TRACE_BRANCH_PROFILING help This tracer profiles all branch conditions. Every if () From e99306bb4f6741d5482f924585975db5a6934953 Mon Sep 17 00:00:00 2001 From: Steffen Weber Date: Tue, 2 Jan 2018 19:24:09 +0100 Subject: [PATCH 1981/3326] scsi: smartpqi: allow static build ("built-in") commit dc2db1dc5fb9ab3a43b305c2720fee5278dbee2a upstream. If CONFIG_SCSI_SMARTPQI=y then don't build this driver as a module. Signed-off-by: Steffen Weber Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/smartpqi/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile index 0f42a225a66461..e6b77993023095 100644 --- a/drivers/scsi/smartpqi/Makefile +++ b/drivers/scsi/smartpqi/Makefile @@ -1,3 +1,3 @@ ccflags-y += -I. -obj-m += smartpqi.o +obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o From 7a748f0bb2e4aca2c18223585523eb07199a4715 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Sun, 28 Jan 2018 11:25:29 +0200 Subject: [PATCH 1982/3326] IB/umad: Fix use of unprotected device pointer commit f23a5350e43c810ca36b26d4ed4ecd9a08686f47 upstream. The ib_write_umad() is protected by taking the umad file mutex. However, it accesses file->port->ib_dev -- which is protected only by the port's mutex (field file_mutex). The ib_umad_remove_one() calls ib_umad_kill_port() which sets port->ib_dev to NULL under the port mutex (NOT the file mutex). It then sets the mad agent to "dead" under the umad file mutex. This is a race condition -- because there is a window where port->ib_dev is NULL, while the agent is not "dead". As a result, we saw stack traces like: [16490.678059] BUG: unable to handle kernel NULL pointer dereference at 00000000000000b0 [16490.678246] IP: ib_umad_write+0x29c/0xa3a [ib_umad] [16490.678333] PGD 0 P4D 0 [16490.678404] Oops: 0000 [#1] SMP PTI [16490.678466] Modules linked in: rdma_ucm(OE) ib_ucm(OE) rdma_cm(OE) iw_cm(OE) ib_ipoib(OE) ib_cm(OE) ib_uverbs(OE) ib_umad(OE) mlx4_en(OE) ptp pps_core mlx4_ib(OE-) ib_core(OE) mlx4_core(OE) mlx_compat (OE) memtrack(OE) devlink mst_pciconf(OE) mst_pci(OE) netconsole nfsv3 nfs_acl nfs lockd grace fscache cfg80211 rfkill esp6_offload esp6 esp4_offload esp4 sunrpc kvm_intel kvm ppdev parport_pc irqbypass parport joydev i2c_piix4 virtio_balloon cirrus drm_kms_helper ttm drm e1000 serio_raw virtio_pci virtio_ring virtio ata_generic pata_acpi qemu_fw_cfg [last unloaded: mlxfw] [16490.679202] CPU: 4 PID: 3115 Comm: sminfo Tainted: G OE 4.14.13-300.fc27.x86_64 #1 [16490.679339] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu2 04/01/2014 [16490.679477] task: ffff9cf753890000 task.stack: ffffaf70c26b0000 [16490.679571] RIP: 0010:ib_umad_write+0x29c/0xa3a [ib_umad] [16490.679664] RSP: 0018:ffffaf70c26b3d90 EFLAGS: 00010202 [16490.679747] RAX: 0000000000000010 RBX: ffff9cf75610fd80 RCX: 0000000000000000 [16490.679856] RDX: 0000000000000001 RSI: 00007ffdf2bfd714 RDI: ffff9cf6bb2a9c00 In the above trace, ib_umad_write is trying to dereference the NULL file->port->ib_dev pointer. Fix this by using the agent's device pointer (the device field in struct ib_mad_agent) -- which IS protected by the umad file mutex. Fixes: 44c58487d51a ("IB/core: Define 'ib' and 'roce' rdma_ah_attr types") Signed-off-by: Jack Morgenstein Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/user_mad.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 603acaf91828ae..6511cb21f6e20b 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -500,7 +500,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, } memset(&ah_attr, 0, sizeof ah_attr); - ah_attr.type = rdma_ah_find_type(file->port->ib_dev, + ah_attr.type = rdma_ah_find_type(agent->device, file->port->port_num); rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid)); rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl); From 5a4255467cd8ce9e2c5f9a25b50f2fe692cf1bb8 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Tue, 14 Nov 2017 04:34:52 -0800 Subject: [PATCH 1983/3326] IB/qib: Fix comparison error with qperf compare/swap test commit 87b3524cb5058fdc7c2afdb92bdb2e079661ddc4 upstream. This failure exists with qib: ver_rc_compare_swap: mismatch, sequence 2, expected 123456789abcdef, got 0 The request builder was using the incorrect inlines to build the request header resulting in incorrect data in the atomic header. Fix by using the appropriate inlines to create the request. Fixes: 261a4351844b ("IB/qib,IB/hfi: Use core common header file") Reviewed-by: Michael J. Ruhl Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/qib/qib_rc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index e9a91736b12dbe..d80b61a71eb8ec 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -434,13 +434,13 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) qp->s_state = OP(COMPARE_SWAP); put_ib_ateth_swap(wqe->atomic_wr.swap, &ohdr->u.atomic_eth); - put_ib_ateth_swap(wqe->atomic_wr.compare_add, - &ohdr->u.atomic_eth); + put_ib_ateth_compare(wqe->atomic_wr.compare_add, + &ohdr->u.atomic_eth); } else { qp->s_state = OP(FETCH_ADD); put_ib_ateth_swap(wqe->atomic_wr.compare_add, &ohdr->u.atomic_eth); - put_ib_ateth_swap(0, &ohdr->u.atomic_eth); + put_ib_ateth_compare(0, &ohdr->u.atomic_eth); } put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr, &ohdr->u.atomic_eth); From ade57e9031fac0c972c488acfce58e9a50e2c8dc Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Fri, 12 Jan 2018 07:58:40 +0200 Subject: [PATCH 1984/3326] IB/mlx4: Fix incorrectly releasing steerable UD QPs when have only ETH ports commit 852f6927594d0d3e8632c889b2ab38cbc46476ad upstream. Allocating steerable UD QPs depends on having at least one IB port, while releasing those QPs does not. As a result, when there are only ETH ports, the IB (RoCE) driver requests releasing a qp range whose base qp is zero, with qp count zero. When SR-IOV is enabled, and the VF driver is running on a VM over a hypervisor which treats such qp release calls as errors (rather than NOPs), we see lines in the VM message log like: mlx4_core 0002:00:02.0: Failed to release qp range base:0 cnt:0 Fix this by adding a check for a zero count in mlx4_release_qp_range() (which thus treats releasing 0 qps as a nop), and eliminating the check for device managed flow steering when releasing steerable UD QPs. (Freeing ib_uc_qpns_bitmap unconditionally is also OK, since it remains NULL when steerable UD QPs are not allocated). Fixes: 4196670be786 ("IB/mlx4: Don't allocate range of steerable UD QPs for Ethernet-only device") Signed-off-by: Jack Morgenstein Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/mlx4/main.c | 13 +++++-------- drivers/net/ethernet/mellanox/mlx4/qp.c | 3 +++ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index c636842c5be0e0..8c681a36e6c7fb 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2972,9 +2972,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) kfree(ibdev->ib_uc_qpns_bitmap); err_steer_qp_release: - if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) - mlx4_qp_release_range(dev, ibdev->steer_qpn_base, - ibdev->steer_qpn_count); + mlx4_qp_release_range(dev, ibdev->steer_qpn_base, + ibdev->steer_qpn_count); err_counter: for (i = 0; i < ibdev->num_ports; ++i) mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); @@ -3079,11 +3078,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) ibdev->iboe.nb.notifier_call = NULL; } - if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { - mlx4_qp_release_range(dev, ibdev->steer_qpn_base, - ibdev->steer_qpn_count); - kfree(ibdev->ib_uc_qpns_bitmap); - } + mlx4_qp_release_range(dev, ibdev->steer_qpn_base, + ibdev->steer_qpn_count); + kfree(ibdev->ib_uc_qpns_bitmap); iounmap(ibdev->uar_map); for (p = 0; p < ibdev->num_ports; ++p) diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 728a2fb1f5c09a..22a3bfe1ed8f56 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -287,6 +287,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) u64 in_param = 0; int err; + if (!cnt) + return; + if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, base_qpn); set_param_h(&in_param, cnt); From 18c0ee900c8d93ae48a99cc1f46d372897b8e9a6 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 3 Jan 2018 13:28:18 -0800 Subject: [PATCH 1985/3326] IB/core: Fix two kernel warnings triggered by rxe registration commit 02ee9da347873699603d9ce0112a80b5dd69dea1 upstream. Eliminate the WARN_ONs that create following two warnings when registering an rxe device: WARNING: CPU: 2 PID: 1005 at drivers/infiniband/core/device.c:449 ib_register_device+0x591/0x640 [ib_core] CPU: 2 PID: 1005 Comm: run_tests Not tainted 4.15.0-rc4-dbg+ #2 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.0.0-prebuilt.qemu-project.org 04/01/2014 RIP: 0010:ib_register_device+0x591/0x640 [ib_core] Call Trace: rxe_register_device+0x3c6/0x470 [rdma_rxe] rxe_add+0x543/0x5e0 [rdma_rxe] rxe_net_add+0x37/0xb0 [rdma_rxe] rxe_param_set_add+0x5a/0x120 [rdma_rxe] param_attr_store+0x5e/0xc0 module_attr_store+0x19/0x30 sysfs_kf_write+0x3d/0x50 kernfs_fop_write+0x116/0x1a0 __vfs_write+0x23/0x120 vfs_write+0xbe/0x1b0 SyS_write+0x44/0xa0 entry_SYSCALL_64_fastpath+0x23/0x9a WARNING: CPU: 2 PID: 1005 at drivers/infiniband/core/sysfs.c:1279 ib_device_register_sysfs+0x11d/0x160 [ib_core] CPU: 2 PID: 1005 Comm: run_tests Tainted: G W 4.15.0-rc4-dbg+ #2 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.0.0-prebuilt.qemu-project.org 04/01/2014 RIP: 0010:ib_device_register_sysfs+0x11d/0x160 [ib_core] Call Trace: ib_register_device+0x3f7/0x640 [ib_core] rxe_register_device+0x3c6/0x470 [rdma_rxe] rxe_add+0x543/0x5e0 [rdma_rxe] rxe_net_add+0x37/0xb0 [rdma_rxe] rxe_param_set_add+0x5a/0x120 [rdma_rxe] param_attr_store+0x5e/0xc0 module_attr_store+0x19/0x30 sysfs_kf_write+0x3d/0x50 kernfs_fop_write+0x116/0x1a0 __vfs_write+0x23/0x120 vfs_write+0xbe/0x1b0 SyS_write+0x44/0xa0 entry_SYSCALL_64_fastpath+0x23/0x9a The code should accept either a parent pointer or a fully specified DMA specification without producing warnings. Fixes: 99db9494035f ("IB/core: Remove ib_device.dma_device") Signed-off-by: Bart Van Assche Cc: Leon Romanovsky Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/device.c | 20 ++++++++++++++------ drivers/infiniband/core/sysfs.c | 1 - 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 84fc32a2c8b3e8..ebfdb5503701ef 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -446,7 +446,6 @@ int ib_register_device(struct ib_device *device, struct ib_udata uhw = {.outlen = 0, .inlen = 0}; struct device *parent = device->dev.parent; - WARN_ON_ONCE(!parent); WARN_ON_ONCE(device->dma_device); if (device->dev.dma_ops) { /* @@ -455,16 +454,25 @@ int ib_register_device(struct ib_device *device, * into device->dev. */ device->dma_device = &device->dev; - if (!device->dev.dma_mask) - device->dev.dma_mask = parent->dma_mask; - if (!device->dev.coherent_dma_mask) - device->dev.coherent_dma_mask = - parent->coherent_dma_mask; + if (!device->dev.dma_mask) { + if (parent) + device->dev.dma_mask = parent->dma_mask; + else + WARN_ON_ONCE(true); + } + if (!device->dev.coherent_dma_mask) { + if (parent) + device->dev.coherent_dma_mask = + parent->coherent_dma_mask; + else + WARN_ON_ONCE(true); + } } else { /* * The caller did not provide custom DMA operations. Use the * DMA mapping operations of the parent device. */ + WARN_ON_ONCE(!parent); device->dma_device = parent; } diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index abc5ab581f8214..0a1e96c25ca36e 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -1262,7 +1262,6 @@ int ib_device_register_sysfs(struct ib_device *device, int ret; int i; - WARN_ON_ONCE(!device->dev.parent); ret = dev_set_name(class_dev, "%s", device->name); if (ret) return ret; From d40ad865704b0a3547ca199acba102020ebb947e Mon Sep 17 00:00:00 2001 From: Bodong Wang Date: Fri, 12 Jan 2018 07:58:41 +0200 Subject: [PATCH 1986/3326] IB/core: Fix ib_wc structure size to remain in 64 bytes boundary commit cd2a6e7d384b043d5d029e39663061cebc949385 upstream. The change of slid from u16 to u32 results in sizeof(struct ib_wc) cross 64B boundary, which causes more cache misses. This patch rearranges the fields and remain the size to 64B. Pahole output before this change: struct ib_wc { union { u64 wr_id; /* 8 */ struct ib_cqe * wr_cqe; /* 8 */ }; /* 0 8 */ enum ib_wc_status status; /* 8 4 */ enum ib_wc_opcode opcode; /* 12 4 */ u32 vendor_err; /* 16 4 */ u32 byte_len; /* 20 4 */ struct ib_qp * qp; /* 24 8 */ union { __be32 imm_data; /* 4 */ u32 invalidate_rkey; /* 4 */ } ex; /* 32 4 */ u32 src_qp; /* 36 4 */ int wc_flags; /* 40 4 */ u16 pkey_index; /* 44 2 */ /* XXX 2 bytes hole, try to pack */ u32 slid; /* 48 4 */ u8 sl; /* 52 1 */ u8 dlid_path_bits; /* 53 1 */ u8 port_num; /* 54 1 */ u8 smac[6]; /* 55 6 */ /* XXX 1 byte hole, try to pack */ u16 vlan_id; /* 62 2 */ /* --- cacheline 1 boundary (64 bytes) --- */ u8 network_hdr_type; /* 64 1 */ /* size: 72, cachelines: 2, members: 17 */ /* sum members: 62, holes: 2, sum holes: 3 */ /* padding: 7 */ /* last cacheline: 8 bytes */ }; Pahole output after this change: struct ib_wc { union { u64 wr_id; /* 8 */ struct ib_cqe * wr_cqe; /* 8 */ }; /* 0 8 */ enum ib_wc_status status; /* 8 4 */ enum ib_wc_opcode opcode; /* 12 4 */ u32 vendor_err; /* 16 4 */ u32 byte_len; /* 20 4 */ struct ib_qp * qp; /* 24 8 */ union { __be32 imm_data; /* 4 */ u32 invalidate_rkey; /* 4 */ } ex; /* 32 4 */ u32 src_qp; /* 36 4 */ u32 slid; /* 40 4 */ int wc_flags; /* 44 4 */ u16 pkey_index; /* 48 2 */ u8 sl; /* 50 1 */ u8 dlid_path_bits; /* 51 1 */ u8 port_num; /* 52 1 */ u8 smac[6]; /* 53 6 */ /* XXX 1 byte hole, try to pack */ u16 vlan_id; /* 60 2 */ u8 network_hdr_type; /* 62 1 */ /* size: 64, cachelines: 1, members: 17 */ /* sum members: 62, holes: 1, sum holes: 1 */ /* padding: 1 */ }; Fixes: 7db20ecd1d97 ("IB/core: Change wc.slid from 16 to 32 bits") Signed-off-by: Bodong Wang Reviewed-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- include/rdma/ib_verbs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e8608b2dc844fb..6533aa64f0098f 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -971,9 +971,9 @@ struct ib_wc { u32 invalidate_rkey; } ex; u32 src_qp; + u32 slid; int wc_flags; u16 pkey_index; - u32 slid; u8 sl; u8 dlid_path_bits; u8 port_num; /* valid only for DR SMPs on switches */ From 1d6eb826e65d8b92d1f8cb081b33bb0786a5a4ab Mon Sep 17 00:00:00 2001 From: "Michael J. Ruhl" Date: Thu, 1 Feb 2018 12:31:06 -0800 Subject: [PATCH 1987/3326] IB/core: Avoid a potential OOPs for an unused optional parameter commit 2ff124d597c2df8696169ce0006fc974c49a4569 upstream. The ev_file is an optional parameter for CQ creation. If the parameter is not passed, the ev_file pointer will be NULL. Using that pointer to set the cq_context will result in an OOPs. Verify that ev_file is not NULL before using. Cc: # 4.14.x Fixes: 9ee79fce3642 ("IB/core: Add completion queue (cq) object actions") Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Signed-off-by: Michael J. Ruhl Signed-off-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/uverbs_std_types.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index 0a98579700ec27..5f9321eda1b70d 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c @@ -315,7 +315,7 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev, cq->uobject = &obj->uobject; cq->comp_handler = ib_uverbs_comp_handler; cq->event_handler = ib_uverbs_cq_event_handler; - cq->cq_context = &ev_file->ev_queue; + cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; obj->uobject.object = cq; obj->uobject.user_handle = user_handle; atomic_set(&cq->usecnt, 0); From 9c2e7a048d19dce8b82e0dc169feb1721d856d37 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Fri, 5 Jan 2018 17:31:18 +0100 Subject: [PATCH 1988/3326] selftests: seccomp: fix compile error seccomp_bpf commit 912ec316686df352028afb6efec59e47a958a24d upstream. aarch64-linux-gnu-gcc -Wl,-no-as-needed -Wall -lpthread seccomp_bpf.c -o seccomp_bpf seccomp_bpf.c: In function 'tracer_ptrace': seccomp_bpf.c:1720:12: error: '__NR_open' undeclared (first use in this function) if (nr == __NR_open) ^~~~~~~~~ seccomp_bpf.c:1720:12: note: each undeclared identifier is reported only once for each function it appears in In file included from seccomp_bpf.c:48:0: seccomp_bpf.c: In function 'TRACE_syscall_ptrace_syscall_dropped': seccomp_bpf.c:1795:39: error: '__NR_open' undeclared (first use in this function) EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_open)); ^ open(2) is a legacy syscall, replaced with openat(2) since 2.6.16. Thus new architectures in the kernel, such as arm64, don't implement these legacy syscalls. Fixes: a33b2d0359a0 ("selftests/seccomp: Add tests for basic ptrace actions") Signed-off-by: Anders Roxell Tested-by: Naresh Kamboju Cc: stable@vger.kernel.org Acked-by: Kees Cook Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/seccomp/seccomp_bpf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 24dbf634e2dd8c..0b457e8e0f0c10 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -1717,7 +1717,7 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, if (nr == __NR_getpid) change_syscall(_metadata, tracee, __NR_getppid); - if (nr == __NR_open) + if (nr == __NR_openat) change_syscall(_metadata, tracee, -1); } @@ -1792,7 +1792,7 @@ TEST_F(TRACE_syscall, ptrace_syscall_dropped) true); /* Tracer should skip the open syscall, resulting in EPERM. */ - EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_open)); + EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat)); } TEST_F(TRACE_syscall, syscall_allowed) From 7dd2dbdd4606fc44da2cbb59fcd1c560c3f3fb94 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 9 Jan 2018 17:26:24 +0100 Subject: [PATCH 1989/3326] kselftest: fix OOM in memory compaction test commit 4c1baad223906943b595a887305f2e8124821dad upstream. Running the compaction_test sometimes results in out-of-memory failures. When I debugged this, it turned out that the code to reset the number of hugepages to the initial value is simply broken since we write into an open sysctl file descriptor multiple times without seeking back to the start. Adding the lseek here fixes the problem. Cc: stable@vger.kernel.org Reported-by: Naresh Kamboju Link: https://bugs.linaro.org/show_bug.cgi?id=3145 Signed-off-by: Arnd Bergmann Signed-off-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/vm/compaction_test.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c index a65b016d4c13a3..1097f04e4d80e6 100644 --- a/tools/testing/selftests/vm/compaction_test.c +++ b/tools/testing/selftests/vm/compaction_test.c @@ -137,6 +137,8 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size) printf("No of huge pages allocated = %d\n", (atoi(nr_hugepages))); + lseek(fd, 0, SEEK_SET); + if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages)) != strlen(initial_nr_hugepages)) { perror("Failed to write value to /proc/sys/vm/nr_hugepages\n"); From 7b4e8a46d4cf2592910275dee6036ceb1d351975 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 9 Jan 2018 11:23:40 -0800 Subject: [PATCH 1990/3326] RDMA/rxe: Fix a race condition related to the QP error state commit 6f301e06de4cf9ab7303f5acd43e64fcd4aa04be upstream. The following sequence: * Change queue pair state into IB_QPS_ERR. * Post a work request on the queue pair. Triggers the following race condition in the rdma_rxe driver: * rxe_qp_error() triggers an asynchronous call of rxe_completer(), the function that examines the QP send queue. * rxe_post_send() posts a work request on the QP send queue. If rxe_completer() runs prior to rxe_post_send(), it will drain the send queue and the driver will assume no further action is necessary. However, once we post the send to the send queue, because the queue is in error, no send completion will ever happen and the send will get stuck. In order to process the send, we need to make sure that rxe_completer() gets run after a send is posted to a queue pair in an error state. This patch ensures that happens. Signed-off-by: Bart Van Assche Cc: Moni Shoua Cc: # v4.8 Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/sw/rxe/rxe_verbs.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 0b362f49a10a96..afbf701dc9a7ce 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -813,6 +813,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr, (queue_count(qp->sq.queue) > 1); rxe_run_task(&qp->req.task, must_sched); + if (unlikely(qp->req.state == QP_STATE_ERROR)) + rxe_run_task(&qp->comp.task, 1); return err; } From 571cb36fac2f62bfe5fe7fa488f0ea7271db9c2f Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 12 Jan 2018 15:11:58 -0800 Subject: [PATCH 1991/3326] RDMA/rxe: Fix a race condition in rxe_requester() commit 65567e41219888feec72fee1de98ccf1efbbc16d upstream. The rxe driver works as follows: * The send queue, receive queue and completion queues are implemented as circular buffers. * ib_post_send() and ib_post_recv() calls are serialized through a spinlock. * Removing elements from various queues happens from tasklet context. Tasklets are guaranteed to run on at most one CPU. This serializes access to these queues. See also rxe_completer(), rxe_requester() and rxe_responder(). * rxe_completer() processes the skbs queued onto qp->resp_pkts. * rxe_requester() handles the send queue (qp->sq.queue). * rxe_responder() processes the skbs queued onto qp->req_pkts. Since rxe_drain_req_pkts() processes qp->req_pkts, calling rxe_drain_req_pkts() from rxe_requester() is racy. Hence this patch. Reported-by: Moni Shoua Signed-off-by: Bart Van Assche Cc: stable@vger.kernel.org Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/sw/rxe/rxe_loc.h | 1 - drivers/infiniband/sw/rxe/rxe_req.c | 9 +-------- drivers/infiniband/sw/rxe/rxe_resp.c | 2 +- 3 files changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 77b3ed0df93637..7f945f65d8cd24 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -237,7 +237,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, void rxe_release(struct kref *kref); -void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify); int rxe_completer(void *arg); int rxe_requester(void *arg); int rxe_responder(void *arg); diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index d84222f9d5d2ed..44b838ec9420dc 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -594,15 +594,8 @@ int rxe_requester(void *arg) rxe_add_ref(qp); next_wqe: - if (unlikely(!qp->valid)) { - rxe_drain_req_pkts(qp, true); + if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) goto exit; - } - - if (unlikely(qp->req.state == QP_STATE_ERROR)) { - rxe_drain_req_pkts(qp, true); - goto exit; - } if (unlikely(qp->req.state == QP_STATE_RESET)) { qp->req.wqe_index = consumer_index(qp->sq.queue); diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 4240866a533192..01f926fd90296e 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -1210,7 +1210,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp) } } -void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) +static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) { struct sk_buff *skb; From 75a3f11c7bad580dc5fbf4175f5dc39192f088d5 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 12 Jan 2018 15:11:59 -0800 Subject: [PATCH 1992/3326] RDMA/rxe: Fix rxe_qp_cleanup() commit bb3ffb7ad48a21e98a5c64eb21103a74fd9f03f6 upstream. rxe_qp_cleanup() can sleep so it must be run in thread context and not in atomic context. This patch avoids that the following bug is triggered: Kernel BUG at 00000000560033f3 [verbose debug info unavailable] BUG: sleeping function called from invalid context at net/core/sock.c:2761 in_atomic(): 1, irqs_disabled(): 0, pid: 7, name: ksoftirqd/0 INFO: lockdep is turned off. Preemption disabled at: [<00000000b6e69628>] __do_softirq+0x4e/0x540 CPU: 0 PID: 7 Comm: ksoftirqd/0 Not tainted 4.15.0-rc7-dbg+ #4 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.0.0-prebuilt.qemu-project.org 04/01/2014 Call Trace: dump_stack+0x85/0xbf ___might_sleep+0x177/0x260 lock_sock_nested+0x1d/0x90 inet_shutdown+0x2e/0xd0 rxe_qp_cleanup+0x107/0x140 [rdma_rxe] rxe_elem_release+0x18/0x80 [rdma_rxe] rxe_requester+0x1cf/0x11b0 [rdma_rxe] rxe_do_task+0x78/0xf0 [rdma_rxe] tasklet_action+0x99/0x270 __do_softirq+0xc0/0x540 run_ksoftirqd+0x1c/0x70 smpboot_thread_fn+0x1be/0x270 kthread+0x117/0x130 ret_from_fork+0x24/0x30 Signed-off-by: Bart Van Assche Cc: Moni Shoua Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/sw/rxe/rxe_qp.c | 12 ++++++++++-- drivers/infiniband/sw/rxe/rxe_verbs.h | 3 +++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 00bda9380a2e45..aeea994b04c437 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -824,9 +824,9 @@ void rxe_qp_destroy(struct rxe_qp *qp) } /* called when the last reference to the qp is dropped */ -void rxe_qp_cleanup(struct rxe_pool_entry *arg) +static void rxe_qp_do_cleanup(struct work_struct *work) { - struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); + struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); rxe_drop_all_mcast_groups(qp); @@ -859,3 +859,11 @@ void rxe_qp_cleanup(struct rxe_pool_entry *arg) kernel_sock_shutdown(qp->sk, SHUT_RDWR); sock_release(qp->sk); } + +/* called when the last reference to the qp is dropped */ +void rxe_qp_cleanup(struct rxe_pool_entry *arg) +{ + struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); + + execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); +} diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 0c2dbe45c729d9..1019f5e7dbddd2 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -35,6 +35,7 @@ #define RXE_VERBS_H #include +#include #include #include "rxe_pool.h" #include "rxe_task.h" @@ -281,6 +282,8 @@ struct rxe_qp { struct timer_list rnr_nak_timer; spinlock_t state_lock; /* guard requester and completer */ + + struct execute_work cleanup_work; }; enum rxe_mem_state { From 8e56a935a4b751bc8cb5288480b23827f8d2564d Mon Sep 17 00:00:00 2001 From: Shilpasri G Bhat Date: Fri, 12 Jan 2018 12:43:53 +0530 Subject: [PATCH 1993/3326] cpufreq: powernv: Dont assume distinct pstate values for nominal and pmin commit 3fa4680b860bf48b437d6a2c039789c4abe202ae upstream. Some OpenPOWER boxes can have same pstate values for nominal and pmin pstates. In these boxes the current code will not initialize 'powernv_pstate_info.min' variable and result in erroneous CPU frequency reporting. This patch fixes this problem. Fixes: 09ca4c9b5958 (cpufreq: powernv: Replacing pstate_id with frequency table index) Reported-by: Alvin Wang Signed-off-by: Shilpasri G Bhat Acked-by: Viresh Kumar Cc: 4.8+ # 4.8+ Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/cpufreq/powernv-cpufreq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 3ff5160451b436..7e1e5bbcf43042 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -287,9 +287,9 @@ static int init_powernv_pstates(void) if (id == pstate_max) powernv_pstate_info.max = i; - else if (id == pstate_nominal) + if (id == pstate_nominal) powernv_pstate_info.nominal = i; - else if (id == pstate_min) + if (id == pstate_min) powernv_pstate_info.min = i; if (powernv_pstate_info.wof_enabled && id == pstate_turbo) { From 37efa60e167036b3eaed20e70095bbdcb72a55ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 4 Jan 2018 14:24:19 +0100 Subject: [PATCH 1994/3326] swiotlb: suppress warning when __GFP_NOWARN is set MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d0bc0c2a31c95002d37c3cc511ffdcab851b3256 upstream. TTM tries to allocate coherent memory in chunks of 2MB first to improve TLB efficiency and falls back to allocating 4K pages if that fails. Suppress the warning when the 2MB allocations fails since there is a valid fall back path. Signed-off-by: Christian König Reported-by: Mike Galbraith Acked-by: Konrad Rzeszutek Wilk Bug: https://bugs.freedesktop.org/show_bug.cgi?id=104082 Signed-off-by: Christoph Hellwig Signed-off-by: Greg Kroah-Hartman --- lib/swiotlb.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 8c6c83ef57a433..20df2fd9b15015 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -585,7 +585,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, not_found: spin_unlock_irqrestore(&io_tlb_lock, flags); - if (printk_ratelimit()) + if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); return SWIOTLB_MAP_ERROR; found: @@ -712,6 +712,7 @@ void * swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags) { + bool warn = !(flags & __GFP_NOWARN); dma_addr_t dev_addr; void *ret; int order = get_order(size); @@ -737,8 +738,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, * GFP_DMA memory; fall back on map_single(), which * will grab memory from the lowest available address range. */ - phys_addr_t paddr = map_single(hwdev, 0, size, - DMA_FROM_DEVICE, 0); + phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE, + warn ? 0 : DMA_ATTR_NO_WARN); if (paddr == SWIOTLB_MAP_ERROR) goto err_warn; @@ -768,9 +769,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, return ret; err_warn: - pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", - dev_name(hwdev), size); - dump_stack(); + if (warn && printk_ratelimit()) { + pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", + dev_name(hwdev), size); + dump_stack(); + } return NULL; } From 62def1d604a5473055b1cce23b55b7639302710a Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Sun, 5 Nov 2017 21:27:41 -0800 Subject: [PATCH 1995/3326] PM / devfreq: Propagate error from devfreq_add_device() commit d1bf2d30728f310f72296b54f0651ecdb09cbb12 upstream. Propagate the error of devfreq_add_device() in devm_devfreq_add_device() rather than statically returning ENOMEM. This makes it slightly faster to pinpoint the cause of a returned error. Fixes: 8cd84092d35e ("PM / devfreq: Add resource-managed function for devfreq device") Cc: stable@vger.kernel.org Acked-by: Chanwoo Choi Signed-off-by: Bjorn Andersson Signed-off-by: MyungJoo Ham Signed-off-by: Greg Kroah-Hartman --- drivers/devfreq/devfreq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index a1c4ee818614d2..202476fbbc4c02 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -676,7 +676,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev, devfreq = devfreq_add_device(dev, profile, governor_name, data); if (IS_ERR(devfreq)) { devres_free(ptr); - return ERR_PTR(-ENOMEM); + return devfreq; } *ptr = devfreq; From 1ec4c78e3444c317c724820f0f40324e65470654 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 12 Jan 2018 13:08:37 -0800 Subject: [PATCH 1996/3326] mwifiex: resolve reset vs. remove()/shutdown() deadlocks commit a64e7a79dd6030479caad603c8d78e6c9c14904f upstream. Commit b014e96d1abb ("PCI: Protect pci_error_handlers->reset_notify() usage with device_lock()") resolves races between driver reset and removal, but it introduces some new deadlock problems. If we see a timeout while we've already started suspending, removing, or shutting down the driver, we might see: (a) a worker thread, running mwifiex_pcie_work() -> mwifiex_pcie_card_reset_work() -> pci_reset_function() (b) a removal thread, running mwifiex_pcie_remove() -> mwifiex_free_adapter() -> mwifiex_unregister() -> mwifiex_cleanup_pcie() -> cancel_work_sync(&card->work) Unfortunately, mwifiex_pcie_remove() already holds the device lock that pci_reset_function() is now requesting, and so we see a deadlock. It's necessary to cancel and synchronize our outstanding work before tearing down the driver, so we can't have this work wait indefinitely for the lock. It's reasonable to only "try" to reset here, since this will mostly happen for cases where it's already difficult to reset the firmware anyway (e.g., while we're suspending or powering off the system). And if reset *really* needs to happen, we can always try again later. Fixes: b014e96d1abb ("PCI: Protect pci_error_handlers->reset_notify() usage with device_lock()") Cc: Cc: Xinming Hu Signed-off-by: Brian Norris Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/marvell/mwifiex/pcie.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index cd314946452c13..9511f5fe62f435 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2781,7 +2781,10 @@ static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - pci_reset_function(card->dev); + /* We can't afford to wait here; remove() might be waiting on us. If we + * can't grab the device lock, maybe we'll get another chance later. + */ + pci_try_reset_function(card->dev); } static void mwifiex_pcie_work(struct work_struct *work) From 2e7e8bd8f19550c56ebc56158f60626546b7c564 Mon Sep 17 00:00:00 2001 From: Gang He Date: Wed, 31 Jan 2018 16:14:48 -0800 Subject: [PATCH 1997/3326] ocfs2: try a blocking lock before return AOP_TRUNCATED_PAGE commit ff26cc10aec128c3f86b5611fd5f59c71d49c0e3 upstream. If we can't get inode lock immediately in the function ocfs2_inode_lock_with_page() when reading a page, we should not return directly here, since this will lead to a softlockup problem when the kernel is configured with CONFIG_PREEMPT is not set. The method is to get a blocking lock and immediately unlock before returning, this can avoid CPU resource waste due to lots of retries, and benefits fairness in getting lock among multiple nodes, increase efficiency in case modifying the same file frequently from multiple nodes. The softlockup crash (when set /proc/sys/kernel/softlockup_panic to 1) looks like: Kernel panic - not syncing: softlockup: hung tasks CPU: 0 PID: 885 Comm: multi_mmap Tainted: G L 4.12.14-6.1-default #1 Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 Call Trace: dump_stack+0x5c/0x82 panic+0xd5/0x21e watchdog_timer_fn+0x208/0x210 __hrtimer_run_queues+0xcc/0x200 hrtimer_interrupt+0xa6/0x1f0 smp_apic_timer_interrupt+0x34/0x50 apic_timer_interrupt+0x96/0xa0 RIP: 0010:unlock_page+0x17/0x30 RSP: 0000:ffffaf154080bc88 EFLAGS: 00000246 ORIG_RAX: ffffffffffffff10 RAX: dead000000000100 RBX: fffff21e009f5300 RCX: 0000000000000004 RDX: dead0000000000ff RSI: 0000000000000202 RDI: fffff21e009f5300 RBP: 0000000000000000 R08: 0000000000000000 R09: ffffaf154080bb00 R10: ffffaf154080bc30 R11: 0000000000000040 R12: ffff993749a39518 R13: 0000000000000000 R14: fffff21e009f5300 R15: fffff21e009f5300 ocfs2_inode_lock_with_page+0x25/0x30 [ocfs2] ocfs2_readpage+0x41/0x2d0 [ocfs2] filemap_fault+0x12b/0x5c0 ocfs2_fault+0x29/0xb0 [ocfs2] __do_fault+0x1a/0xa0 __handle_mm_fault+0xbe8/0x1090 handle_mm_fault+0xaa/0x1f0 __do_page_fault+0x235/0x4b0 trace_do_page_fault+0x3c/0x110 async_page_fault+0x28/0x30 RIP: 0033:0x7fa75ded638e RSP: 002b:00007ffd6657db18 EFLAGS: 00010287 RAX: 000055c7662fb700 RBX: 0000000000000001 RCX: 000055c7662fb700 RDX: 0000000000001770 RSI: 00007fa75e909000 RDI: 000055c7662fb700 RBP: 0000000000000003 R08: 000000000000000e R09: 0000000000000000 R10: 0000000000000483 R11: 00007fa75ded61b0 R12: 00007fa75e90a770 R13: 000000000000000e R14: 0000000000001770 R15: 0000000000000000 About performance improvement, we can see the testing time is reduced, and CPU utilization decreases, the detailed data is as follows. I ran multi_mmap test case in ocfs2-test package in a three nodes cluster. Before applying this patch: PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 2754 ocfs2te+ 20 0 170248 6980 4856 D 80.73 0.341 0:18.71 multi_mmap 1505 root rt 0 222236 123060 97224 S 2.658 6.015 0:01.44 corosync 5 root 20 0 0 0 0 S 1.329 0.000 0:00.19 kworker/u8:0 95 root 20 0 0 0 0 S 1.329 0.000 0:00.25 kworker/u8:1 2728 root 20 0 0 0 0 S 0.997 0.000 0:00.24 jbd2/sda1-33 2721 root 20 0 0 0 0 S 0.664 0.000 0:00.07 ocfs2dc-3C8CFD4 2750 ocfs2te+ 20 0 142976 4652 3532 S 0.664 0.227 0:00.28 mpirun ocfs2test@tb-node2:~>multiple_run.sh -i ens3 -k ~/linux-4.4.21-69.tar.gz -o ~/ocfs2mullog -C hacluster -s pcmk -n tb-node2,tb-node1,tb-node3 -d /dev/sda1 -b 4096 -c 32768 -t multi_mmap /mnt/shared Tests with "-b 4096 -C 32768" Thu Dec 28 14:44:52 CST 2017 multi_mmap..................................................Passed. Runtime 783 seconds. After apply this patch: PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 2508 ocfs2te+ 20 0 170248 6804 4680 R 54.00 0.333 0:55.37 multi_mmap 155 root 20 0 0 0 0 S 2.667 0.000 0:01.20 kworker/u8:3 95 root 20 0 0 0 0 S 2.000 0.000 0:01.58 kworker/u8:1 2504 ocfs2te+ 20 0 142976 4604 3480 R 1.667 0.225 0:01.65 mpirun 5 root 20 0 0 0 0 S 1.000 0.000 0:01.36 kworker/u8:0 2482 root 20 0 0 0 0 S 1.000 0.000 0:00.86 jbd2/sda1-33 299 root 0 -20 0 0 0 S 0.333 0.000 0:00.13 kworker/2:1H 335 root 0 -20 0 0 0 S 0.333 0.000 0:00.17 kworker/1:1H 535 root 20 0 12140 7268 1456 S 0.333 0.355 0:00.34 haveged 1282 root rt 0 222284 123108 97224 S 0.333 6.017 0:01.33 corosync ocfs2test@tb-node2:~>multiple_run.sh -i ens3 -k ~/linux-4.4.21-69.tar.gz -o ~/ocfs2mullog -C hacluster -s pcmk -n tb-node2,tb-node1,tb-node3 -d /dev/sda1 -b 4096 -c 32768 -t multi_mmap /mnt/shared Tests with "-b 4096 -C 32768" Thu Dec 28 15:04:12 CST 2017 multi_mmap..................................................Passed. Runtime 487 seconds. Link: http://lkml.kernel.org/r/1514447305-30814-1-git-send-email-ghe@suse.com Fixes: 1cce4df04f37 ("ocfs2: do not lock/unlock() inode DLM lock") Signed-off-by: Gang He Reviewed-by: Eric Ren Acked-by: alex chen Acked-by: piaojun Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Cc: Joseph Qi Cc: Changwei Ge Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/ocfs2/dlmglue.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 4689940a953c2f..5193218f588982 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2486,6 +2486,15 @@ int ocfs2_inode_lock_with_page(struct inode *inode, ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK); if (ret == -EAGAIN) { unlock_page(page); + /* + * If we can't get inode lock immediately, we should not return + * directly here, since this will lead to a softlockup problem. + * The method is to get a blocking lock and immediately unlock + * before returning, this can avoid CPU resource waste due to + * lots of retries, and benefits fairness in getting lock. + */ + if (ocfs2_inode_lock(inode, ret_bh, ex) == 0) + ocfs2_inode_unlock(inode, ex); ret = AOP_TRUNCATED_PAGE; } From 5b98d31481f9e216e6df6b31e5a959db259c055e Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Thu, 30 Nov 2017 14:35:54 +0530 Subject: [PATCH 1998/3326] powerpc/radix: Remove trace_tlbie call from radix__flush_tlb_all commit 8d81296cfcce89013a714feb8d25004a156f8181 upstream. radix__flush_tlb_all() is called only in kexec path in real mode and any tracepoints at this stage will make kexec to fail if enabled. To verify enable tlbie trace before kexec. $ echo 1 > /sys/kernel/debug/tracing/events/powerpc/tlbie/enable == kexec into new kernel and kexec fails. Fix this by not calling trace_tlbie from radix__flush_tlb_all(). Fixes: 0428491cba92 ("powerpc/mm: Trace tlbie(l) instructions") Cc: stable@vger.kernel.org # v4.13+ Signed-off-by: Mahesh Salgaonkar Acked-by: Balbir Singh Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/mm/tlb-radix.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index d304028641a233..4b295cfd5f7e88 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -453,14 +453,12 @@ void radix__flush_tlb_all(void) */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory"); - trace_tlbie(0, 0, rb, rs, ric, prs, r); /* * now flush host entires by passing PRS = 0 and LPID == 0 */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory"); asm volatile("eieio; tlbsync; ptesync": : :"memory"); - trace_tlbie(0, 0, rb, 0, ric, prs, r); } void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, From 4386f223b41868197080bd74882c2d31aca92a1a Mon Sep 17 00:00:00 2001 From: Nathan Fontenot Date: Fri, 26 Jan 2018 13:41:59 -0600 Subject: [PATCH 1999/3326] powerpc/numa: Invalidate numa_cpu_lookup_table on cpu remove commit 1d9a090783bef19fe8cdec878620d22f05191316 upstream. When DLPAR removing a CPU, the unmapping of the cpu from a node in unmap_cpu_from_node() should also invalidate the CPUs entry in the numa_cpu_lookup_table. There is not a guarantee that on a subsequent DLPAR add of the CPU the associativity will be the same and thus could be in a different node. Invalidating the entry in the numa_cpu_lookup_table causes the associativity to be read from the device tree at the time of the add. The current behavior of not invalidating the CPUs entry in the numa_cpu_lookup_table can result in scenarios where the the topology layout of CPUs in the partition does not match the device tree or the topology reported by the HMC. This bug looks like it was introduced in 2004 in the commit titled "ppc64: cpu hotplug notifier for numa", which is 6b15e4e87e32 in the linux-fullhist tree. Hence tag it for all stable releases. Cc: stable@vger.kernel.org Signed-off-by: Nathan Fontenot Reviewed-by: Tyrel Datwyler Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/include/asm/topology.h | 5 +++++ arch/powerpc/mm/numa.c | 5 ----- arch/powerpc/platforms/pseries/hotplug-cpu.c | 2 ++ 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 023ff9f17501fa..d5f2ee882f7485 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -44,6 +44,11 @@ extern int sysfs_add_device_to_node(struct device *dev, int nid); extern void sysfs_remove_device_from_node(struct device *dev, int nid); extern int numa_update_cpu_topology(bool cpus_locked); +static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) +{ + numa_cpu_lookup_table[cpu] = node; +} + static inline int early_cpu_to_node(int cpu) { int nid; diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index a51df9ef529d93..a81279249bfb14 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -142,11 +142,6 @@ static void reset_numa_cpu_lookup_table(void) numa_cpu_lookup_table[cpu] = -1; } -static void update_numa_cpu_lookup_table(unsigned int cpu, int node) -{ - numa_cpu_lookup_table[cpu] = node; -} - static void map_cpu_to_node(int cpu, int node) { update_numa_cpu_lookup_table(cpu, node); diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index fadb95efbb9e18..b1ac8ac38434b8 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "pseries.h" #include "offline_states.h" @@ -331,6 +332,7 @@ static void pseries_remove_processor(struct device_node *np) BUG_ON(cpu_online(cpu)); set_cpu_present(cpu, false); set_hard_smp_processor_id(cpu, -1); + update_numa_cpu_lookup_table(cpu, -1); break; } if (cpu >= nr_cpu_ids) From 892674b5058a2b15d35d1185f5d09d8f4c257a7e Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Thu, 1 Feb 2018 16:09:44 +1100 Subject: [PATCH 2000/3326] powerpc/mm: Flush radix process translations when setting MMU type commit 62e984ddfd6b056d399e24113f5e6a7145e579d8 upstream. Radix guests do normally invalidate process-scoped translations when a new pid is allocated but migrated guests do not invalidate these so migrated guests crash sometime, especially easy to reproduce with migration happening within first 10 seconds after the guest boot start on the same machine. This adds the "Invalidate process-scoped translations" flush to fix radix guests migration. Fixes: 2ee13be34b13 ("KVM: PPC: Book3S HV: Update kvmppc_set_arch_compat() for ISA v3.00") Cc: stable@vger.kernel.org # v4.10+ Signed-off-by: Alexey Kardashevskiy Tested-by: Laurent Vivier Tested-by: Daniel Henrique Barboza Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/mm/pgtable_64.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index ac0717a90ca6bd..12f95b1f7d078a 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -483,6 +483,8 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, if (old & PATB_HR) { asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); + asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : + "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); } else { asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : From 3b09911d3beb326a117bfb05c717c3021e5458ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Tue, 13 Feb 2018 09:47:12 +0100 Subject: [PATCH 2001/3326] powerpc/xive: Use hw CPU ids when configuring the CPU queues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 8e036c8d30a2cd9d8fc7442fbf6824e0a3e986e7 upstream. The CPU event notification queues on sPAPR should be configured using a hardware CPU identifier. The problem did not show up on the Power Hypervisor because pHyp supports 8 threads per core which keeps CPU number contiguous. This is not the case on all sPAPR virtual machines, some use SMT=1. Also improve error logging by adding the CPU number. Fixes: eac1e731b59e ("powerpc/xive: guest exploitation of the XIVE interrupt controller") Cc: stable@vger.kernel.org # v4.14+ Signed-off-by: Cédric Le Goater Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/sysdev/xive/spapr.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index d9c4c936604918..091f1d0d0af190 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size); if (rc) { - pr_err("Error %lld getting queue info prio %d\n", rc, prio); + pr_err("Error %lld getting queue info CPU %d prio %d\n", rc, + target, prio); rc = -EIO; goto fail; } @@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, /* Configure and enable the queue in HW */ rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order); if (rc) { - pr_err("Error %lld setting queue for prio %d\n", rc, prio); + pr_err("Error %lld setting queue for CPU %d prio %d\n", rc, + target, prio); rc = -EIO; } else { q->qpage = qpage; @@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc, if (IS_ERR(qpage)) return PTR_ERR(qpage); - return xive_spapr_configure_queue(cpu, q, prio, qpage, - xive_queue_shift); + return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu), + q, prio, qpage, xive_queue_shift); } static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, @@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, struct xive_q *q = &xc->queue[prio]; unsigned int alloc_order; long rc; + int hw_cpu = get_hard_smp_processor_id(cpu); - rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0); + rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0); if (rc) - pr_err("Error %ld setting queue for prio %d\n", rc, prio); + pr_err("Error %ld setting queue for CPU %d prio %d\n", rc, + hw_cpu, prio); alloc_order = xive_alloc_order(xive_queue_shift); free_pages((unsigned long)q->qpage, alloc_order); From 40be210c830e80642ce5f4e3d726864ec22d5f50 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 10 Nov 2017 12:15:00 +1100 Subject: [PATCH 2002/3326] powerpc: Fix DABR match on hash based systems commit f23ab3efb1b30cc5c5ef5ae4ef294ed467f30675 upstream. Commit 398a719d34a1 ("powerpc/mm: Update bits used to skip hash_page") mistakenly dropped the DSISR_DABRMATCH bit from the mask of bit tested to skip trying to hash a page. As a result, the DABR matches would no longer be detected. This adds it back. We open code it in the 2 places where it matters rather than fold it into DSISR_BAD_FAULT_32S/64S because this isn't technically a bad fault and while we would never hit it with the current code, I prefer if page_fault_is_bad() didn't trigger on these. Fixes: 398a719d34a1 ("powerpc/mm: Update bits used to skip hash_page") Cc: stable@vger.kernel.org # v4.14 Tested-by: Pedro Miraglia Franco de Carvalho Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Naveen N. Rao Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/exceptions-64s.S | 2 +- arch/powerpc/kernel/head_32.S | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index e9f72abc52b72b..e91b40aa541739 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1617,7 +1617,7 @@ USE_TEXT_SECTION() .balign IFETCH_ALIGN_BYTES do_hash_page: #ifdef CONFIG_PPC_STD_MMU_64 - lis r0,DSISR_BAD_FAULT_64S@h + lis r0,(DSISR_BAD_FAULT_64S|DSISR_DABRMATCH)@h ori r0,r0,DSISR_BAD_FAULT_64S@l and. r0,r4,r0 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 8c54166491e7cf..29b2fed9328973 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -388,7 +388,7 @@ DataAccess: EXCEPTION_PROLOG mfspr r10,SPRN_DSISR stw r10,_DSISR(r11) - andis. r0,r10,DSISR_BAD_FAULT_32S@h + andis. r0,r10,(DSISR_BAD_FAULT_32S|DSISR_DABRMATCH)@h bne 1f /* if not, try to put a PTE */ mfspr r4,SPRN_DAR /* into the hash table */ rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ From f25dfc9359cc5e173a5b94e407c6417f5889c9c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 22 Jan 2018 21:00:03 +0100 Subject: [PATCH 2003/3326] dma-buf: fix reservation_object_wait_timeout_rcu once more v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 5bffee867df7494ecd32c1e6ec4e8fc934c521b7 upstream. We need to set shared_count even if we already have a fence to wait for. v2: init i to -1 as well Signed-off-by: Christian König Cc: stable@vger.kernel.org Tested-by: Lyude Paul Reviewed-by: Lyude Paul Reviewed-by: Chris Wilson Signed-off-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20180122200003.6665-1-christian.koenig@amd.com Signed-off-by: Greg Kroah-Hartman --- drivers/dma-buf/reservation.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index b44d9d7db347b4..012fa3d1f407f4 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -455,13 +455,15 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, unsigned long timeout) { struct dma_fence *fence; - unsigned seq, shared_count, i = 0; + unsigned seq, shared_count; long ret = timeout ? timeout : 1; + int i; retry: shared_count = 0; seq = read_seqcount_begin(&obj->seq); rcu_read_lock(); + i = -1; fence = rcu_dereference(obj->fence_excl); if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { @@ -477,14 +479,14 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, fence = NULL; } - if (!fence && wait_all) { + if (wait_all) { struct reservation_object_list *fobj = rcu_dereference(obj->fence); if (fobj) shared_count = fobj->shared_count; - for (i = 0; i < shared_count; ++i) { + for (i = 0; !fence && i < shared_count; ++i) { struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, From 27f97375fcb86750cdf82a65dab8174bb991c238 Mon Sep 17 00:00:00 2001 From: Eugene Syromiatnikov Date: Mon, 15 Jan 2018 20:38:17 +0100 Subject: [PATCH 2004/3326] s390: fix handling of -1 in set{,fs}[gu]id16 syscalls commit 6dd0d2d22aa363fec075cb2577ba273ac8462e94 upstream. For some reason, the implementation of some 16-bit ID system calls (namely, setuid16/setgid16 and setfsuid16/setfsgid16) used type cast instead of low2highgid/low2highuid macros for converting [GU]IDs, which led to incorrect handling of value of -1 (which ought to be considered invalid). Discovered by strace test suite. Cc: stable@vger.kernel.org Signed-off-by: Eugene Syromiatnikov Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Greg Kroah-Hartman --- arch/s390/kernel/compat_linux.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 59eea9c65d3e9e..79b7a3438d549e 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid) COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid) { - return sys_setgid((gid_t)gid); + return sys_setgid(low2highgid(gid)); } COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) @@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid) { - return sys_setuid((uid_t)uid); + return sys_setuid(low2highuid(uid)); } COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid) @@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp, COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid) { - return sys_setfsuid((uid_t)uid); + return sys_setfsuid(low2highuid(uid)); } COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid) { - return sys_setfsgid((gid_t)gid); + return sys_setfsgid(low2highgid(gid)); } static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info) From bd420093636759d081bb31b343807efc5bbc58d3 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Thu, 30 Nov 2017 21:27:32 -0800 Subject: [PATCH 2005/3326] arm64: dts: msm8916: Correct ipc references for smsm commit 566bd8902e7fa20bd412ed753e09e89c1c96939c upstream. SMSM is not symmetrical, the incoming bits from WCNSS are available at index 6, but the outgoing host id for WCNSS is 3. Further more, upstream references the base of APCS (in contrast to downstream), so the register offset of 8 must be included. Fixes: 1fb47e0a9ba4 ("arm64: dts: qcom: msm8916: Add smsm and smp2p nodes") Cc: stable@vger.kernel.org Reported-by: Ramon Fried Signed-off-by: Bjorn Andersson Signed-off-by: Andy Gross Signed-off-by: Greg Kroah-Hartman --- arch/arm64/boot/dts/qcom/msm8916.dtsi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index dc3817593e1447..8e5421a0b43d8b 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -1430,8 +1430,8 @@ #address-cells = <1>; #size-cells = <0>; - qcom,ipc-1 = <&apcs 0 13>; - qcom,ipc-6 = <&apcs 0 19>; + qcom,ipc-1 = <&apcs 8 13>; + qcom,ipc-3 = <&apcs 8 19>; apps_smsm: apps@0 { reg = <0>; From 34d58f7ccb65c9a2d2c9221b282e556e08e5a801 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 10 Jan 2018 17:22:00 +0100 Subject: [PATCH 2006/3326] ARM: lpc3250: fix uda1380 gpio numbers commit ca32e0c4bf9ca4b87089c5a5ce945e5f2ec890da upstream. dtc warns about obviously incorrect GPIO numbers for the audio codec on both lpc32xx boards: arch/arm/boot/dts/lpc3250-phy3250.dtb: Warning (gpios_property): reset-gpio property size (12) too small for cell size 3 in /ahb/apb/i2c@400A0000/uda1380@18 arch/arm/boot/dts/lpc3250-phy3250.dtb: Warning (gpios_property): power-gpio property size (12) too small for cell size 3 in /ahb/apb/i2c@400A0000/uda1380@18 arch/arm/boot/dts/lpc3250-ea3250.dtb: Warning (gpios_property): reset-gpio property size (12) too small for cell size 3 in /ahb/apb/i2c@400A0000/uda1380@18 arch/arm/boot/dts/lpc3250-ea3250.dtb: Warning (gpios_property): power-gpio property size (12) too small for cell size 3 in /ahb/apb/i2c@400A0000/uda1380@18 It looks like the nodes are written for a different binding that combines the GPIO number into a single number rather than a bank/number pair. I found the right numbers on stackexchange.com, so this patch fixes the warning and has a reasonable chance of getting things to actually work. Cc: stable@vger.kernel.org Link: https://unix.stackexchange.com/questions/59497/alsa-asoc-how-to-correctly-load-devices-drivers/62217#62217 Signed-off-by: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/lpc3250-ea3250.dts | 4 ++-- arch/arm/boot/dts/lpc3250-phy3250.dts | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm/boot/dts/lpc3250-ea3250.dts b/arch/arm/boot/dts/lpc3250-ea3250.dts index 52b3ed10283a7a..e2bc731079be8d 100644 --- a/arch/arm/boot/dts/lpc3250-ea3250.dts +++ b/arch/arm/boot/dts/lpc3250-ea3250.dts @@ -156,8 +156,8 @@ uda1380: uda1380@18 { compatible = "nxp,uda1380"; reg = <0x18>; - power-gpio = <&gpio 0x59 0>; - reset-gpio = <&gpio 0x51 0>; + power-gpio = <&gpio 3 10 0>; + reset-gpio = <&gpio 3 2 0>; dac-clk = "wspll"; }; diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts index fd95e2b1035775..b7bd3a110a8dd0 100644 --- a/arch/arm/boot/dts/lpc3250-phy3250.dts +++ b/arch/arm/boot/dts/lpc3250-phy3250.dts @@ -81,8 +81,8 @@ uda1380: uda1380@18 { compatible = "nxp,uda1380"; reg = <0x18>; - power-gpio = <&gpio 0x59 0>; - reset-gpio = <&gpio 0x51 0>; + power-gpio = <&gpio 3 10 0>; + reset-gpio = <&gpio 3 2 0>; dac-clk = "wspll"; }; From 9be4b2f6d024c89affe63360475fba9b167d7230 Mon Sep 17 00:00:00 2001 From: Patrice Chotard Date: Wed, 10 Jan 2018 09:21:02 +0100 Subject: [PATCH 2007/3326] ARM: dts: STi: Add gpio polarity for "hdmi,hpd-gpio" property commit 7ac1f59c09a61e6af6622df6809e003b0af07f3d upstream. The GPIO polarity is missing in the hdmi,hpd-gpio property, this fixes the following DT warnings: arch/arm/boot/dts/stih410-b2120.dtb: Warning (gpios_property): hdmi,hpd-gpio property size (8) too small for cell size 2 in /soc/sti-display-subsystem/sti-hdmi@8d04000 arch/arm/boot/dts/stih407-b2120.dtb: Warning (gpios_property): hdmi,hpd-gpio property size (8) too small for cell size 2 in /soc/sti-display-subsystem/sti-hdmi@8d04000 arch/arm/boot/dts/stih410-b2260.dtb: Warning (gpios_property): hdmi,hpd-gpio property size (8) too small for cell size 2 in /soc/sti-display-subsystem/sti-hdmi@8d04000 [arnd: marked Cc:stable since this warning shows up with the latest dtc by default, and is more likely to actually cause problems than the other patches from this series] Cc: stable@vger.kernel.org Signed-off-by: Patrice Chotard Signed-off-by: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/stih407.dtsi | 3 ++- arch/arm/boot/dts/stih410.dtsi | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi index fa149837df14a0..11fdecd9312e1b 100644 --- a/arch/arm/boot/dts/stih407.dtsi +++ b/arch/arm/boot/dts/stih407.dtsi @@ -8,6 +8,7 @@ */ #include "stih407-clock.dtsi" #include "stih407-family.dtsi" +#include / { soc { sti-display-subsystem { @@ -122,7 +123,7 @@ <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>; - hdmi,hpd-gpio = <&pio5 3>; + hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>; reset-names = "hdmi"; resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; ddc = <&hdmiddc>; diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi index 21fe72b183d87c..96eed0dc08b8fe 100644 --- a/arch/arm/boot/dts/stih410.dtsi +++ b/arch/arm/boot/dts/stih410.dtsi @@ -9,6 +9,7 @@ #include "stih410-clock.dtsi" #include "stih407-family.dtsi" #include "stih410-pinctrl.dtsi" +#include / { aliases { bdisp0 = &bdisp0; @@ -213,7 +214,7 @@ <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>; - hdmi,hpd-gpio = <&pio5 3>; + hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>; reset-names = "hdmi"; resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; ddc = <&hdmiddc>; From be1965486122417edcd95893e8414902696e8e11 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 15 Jan 2018 17:37:08 +0100 Subject: [PATCH 2008/3326] ARM: dts: nomadik: add interrupt-parent for clcd commit e8bfa0422469cdfc86be3f525f621b1d44d2481b upstream. The clcd device is lacking an interrupt-parent property, which makes the interrupt unusable and shows up as a warning with the latest dtc version: arch/arm/boot/dts/ste-nomadik-s8815.dtb: Warning (interrupts_property): Missing interrupt-parent for /amba/clcd@10120000 arch/arm/boot/dts/ste-nomadik-nhk15.dtb: Warning (interrupts_property): Missing interrupt-parent for /amba/clcd@10120000 I looked up the old board files and found that this interrupt has the same irqchip as all the other on-chip device, it just needs one extra line. Fixes: 17470b7da11c ("ARM: dts: add the CLCD LCD display to the NHK15") Reviewed-by: Linus Walleij Cc: stable@vger.kernel.org Signed-off-by: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/ste-nomadik-stn8815.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi index 68aab50a73abc4..733678b75b88b7 100644 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi @@ -750,6 +750,7 @@ reg = <0x10120000 0x1000>; interrupt-names = "combined"; interrupts = <14>; + interrupt-parent = <&vica>; clocks = <&clcdclk>, <&hclkclcd>; clock-names = "clcdclk", "apb_pclk"; status = "disabled"; From cbcc2ff13af5664804f0c2622aff9159f79f249f Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Thu, 7 Dec 2017 14:43:24 +0800 Subject: [PATCH 2009/3326] arm: dts: mt7623: fix card detection issue on bananapi-r2 commit b96a696fb2a8dd1ee4e6cfee48dcac270fc53287 upstream. Fix that bananapi-r2 booting from SD-card would fail since incorrect polarity is applied to the previous setup with GPIO_ACTIVE_HIGH. Cc: stable@vger.kernel.org Fixes: 0eed8d097612 ("arm: dts: mt7623: Add SD-card and EMMC to bananapi-r2") Signed-off-by: Sean Wang Tested-by: Matthias Brugger Signed-off-by: Matthias Brugger Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts index 688a86378ceef8..7bf5aa2237c9d1 100644 --- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts +++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts @@ -204,7 +204,7 @@ bus-width = <4>; max-frequency = <50000000>; cap-sd-highspeed; - cd-gpios = <&pio 261 0>; + cd-gpios = <&pio 261 GPIO_ACTIVE_LOW>; vmmc-supply = <&mt6323_vmch_reg>; vqmmc-supply = <&mt6323_vio18_reg>; }; From b1637c1236d0e0240277b616f94882022cd2a55e Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 11 Jan 2018 11:28:51 +0530 Subject: [PATCH 2010/3326] arm: spear600: Add missing interrupt-parent of rtc commit 6ffb5b4f248fe53e0361b8cbc2a523b432566442 upstream. The interrupt-parent of rtc was missing, add it. Fixes: 8113ba917dfa ("ARM: SPEAr: DT: Update device nodes") Cc: stable@vger.kernel.org # v3.8+ Reported-by: Arnd Bergmann Signed-off-by: Viresh Kumar Signed-off-by: Olof Johansson Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/spear600.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi index 6b32d20acc9f4b..00166eb9be86f0 100644 --- a/arch/arm/boot/dts/spear600.dtsi +++ b/arch/arm/boot/dts/spear600.dtsi @@ -194,6 +194,7 @@ rtc: rtc@fc900000 { compatible = "st,spear600-rtc"; reg = <0xfc900000 0x1000>; + interrupt-parent = <&vic0>; interrupts = <10>; status = "disabled"; }; From 349d03cb5378e97e6bfd4fed5c78d3bab115b36e Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 11 Jan 2018 11:28:52 +0530 Subject: [PATCH 2011/3326] arm: spear13xx: Fix dmas cells commit cdd10409914184c7eee5ae3e11beb890c9c16c61 upstream. The "dmas" cells for the designware DMA controller need to have only 3 properties apart from the phandle: request line, src master and destination master. But the commit 6e8887f60f60 updated it incorrectly while moving from platform code to DT. Fix it. Cc: stable@vger.kernel.org # v3.10+ Fixes: 6e8887f60f60 ("ARM: SPEAr13xx: Pass generic DW DMAC platform data from DT") Reported-by: Arnd Bergmann Signed-off-by: Viresh Kumar Signed-off-by: Olof Johansson Signed-off-by: Greg Kroah-Hartman --- Documentation/devicetree/bindings/dma/snps-dma.txt | 2 +- arch/arm/boot/dts/spear1340.dtsi | 4 ++-- arch/arm/boot/dts/spear13xx.dtsi | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt index a122723907acb9..99acc712f83aca 100644 --- a/Documentation/devicetree/bindings/dma/snps-dma.txt +++ b/Documentation/devicetree/bindings/dma/snps-dma.txt @@ -64,6 +64,6 @@ Example: reg = <0xe0000000 0x1000>; interrupts = <0 35 0x4>; dmas = <&dmahost 12 0 1>, - <&dmahost 13 0 1 0>; + <&dmahost 13 1 0>; dma-names = "rx", "rx"; }; diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi index 5f347054527def..d4dbc4098653e1 100644 --- a/arch/arm/boot/dts/spear1340.dtsi +++ b/arch/arm/boot/dts/spear1340.dtsi @@ -142,8 +142,8 @@ reg = <0xb4100000 0x1000>; interrupts = <0 105 0x4>; status = "disabled"; - dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */ - <&dwdma0 0x680 0 1 0>; /* 0xD << 7 */ + dmas = <&dwdma0 12 0 1>, + <&dwdma0 13 1 0>; dma-names = "tx", "rx"; }; diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index 17ea0abcdbd7c5..086b4b3332494b 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi @@ -100,7 +100,7 @@ reg = <0xb2800000 0x1000>; interrupts = <0 29 0x4>; status = "disabled"; - dmas = <&dwdma0 0 0 0 0>; + dmas = <&dwdma0 0 0 0>; dma-names = "data"; }; @@ -290,8 +290,8 @@ #size-cells = <0>; interrupts = <0 31 0x4>; status = "disabled"; - dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */ - <&dwdma0 0x0280 0 0 0>; /* 0x5 << 7 */ + dmas = <&dwdma0 4 0 0>, + <&dwdma0 5 0 0>; dma-names = "tx", "rx"; }; From ce6ec5b8dc5a15b830022dd6ee5a040ead0debb9 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 11 Jan 2018 11:28:53 +0530 Subject: [PATCH 2012/3326] arm: spear13xx: Fix spics gpio controller's warning commit f8975cb1b8a36d0839b6365235778dd9df1d04ca upstream. This fixes the following warning by also sending the flags argument for gpio controllers: Property 'cs-gpios', cell 6 is not a phandle reference in /ahb/apb/spi@e0100000 Fixes: 8113ba917dfa ("ARM: SPEAr: DT: Update device nodes") Cc: stable@vger.kernel.org # v3.8+ Reported-by: Arnd Bergmann Signed-off-by: Viresh Kumar Signed-off-by: Olof Johansson Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/spear1310-evb.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts index 84101e4eebbf88..0f5f379323a8e5 100644 --- a/arch/arm/boot/dts/spear1310-evb.dts +++ b/arch/arm/boot/dts/spear1310-evb.dts @@ -349,7 +349,7 @@ spi0: spi@e0100000 { status = "okay"; num-cs = <3>; - cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>; + cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>; stmpe610@0 { compatible = "st,stmpe610"; From cc2759b7bb7c55a4bed674a8223c89a8b88fb3d7 Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Wed, 30 Aug 2017 17:12:05 +0100 Subject: [PATCH 2013/3326] drm/i915: add GT number to intel_device_info commit 0890540e21cf1156b4cf960a4c1c734db4e816f9 upstream. Up to Coffeelake we could deduce this GT number from the device ID. This doesn't seem to be the case anymore. This change reorders pciids per GT and adds a gt field to intel_device_info. We set this field on the following platforms : - SNB/IVB/HSW/BDW/SKL/KBL/CFL/CNL Before & After : $ modinfo drivers/gpu/drm/i915/i915.ko | grep ^alias | wc -l 209 v2: Add SNB & IVB (Chris) v3: Fix compilation error in early-quirks (Lionel) v4: Fix inconsistency between FEATURE/PLATFORM macros (Ville) Signed-off-by: Lionel Landwerlin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20170830161208.29221-2-lionel.g.landwerlin@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_pci.c | 193 +++++++++++++++++++++++++------- include/drm/i915_pciids.h | 152 +++++++++++++++---------- 3 files changed, 246 insertions(+), 100 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 18d9da53282b67..3f818412765c85 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -842,6 +842,7 @@ struct intel_device_info { u8 gen; u16 gen_mask; enum intel_platform platform; + u8 gt; /* GT number, 0 if undefined */ u8 ring_mask; /* Rings supported by the HW */ u8 num_rings; #define DEFINE_FLAG(name) u8 name:1 diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 09d97e0990b787..2985f1e418ad62 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -224,15 +224,34 @@ static const struct intel_device_info intel_ironlake_m_info = { GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_sandybridge_d_info = { - GEN6_FEATURES, - .platform = INTEL_SANDYBRIDGE, +#define SNB_D_PLATFORM \ + GEN6_FEATURES, \ + .platform = INTEL_SANDYBRIDGE + +static const struct intel_device_info intel_sandybridge_d_gt1_info = { + SNB_D_PLATFORM, + .gt = 1, }; -static const struct intel_device_info intel_sandybridge_m_info = { - GEN6_FEATURES, - .platform = INTEL_SANDYBRIDGE, - .is_mobile = 1, +static const struct intel_device_info intel_sandybridge_d_gt2_info = { + SNB_D_PLATFORM, + .gt = 2, +}; + +#define SNB_M_PLATFORM \ + GEN6_FEATURES, \ + .platform = INTEL_SANDYBRIDGE, \ + .is_mobile = 1 + + +static const struct intel_device_info intel_sandybridge_m_gt1_info = { + SNB_M_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_sandybridge_m_gt2_info = { + SNB_M_PLATFORM, + .gt = 2, }; #define GEN7_FEATURES \ @@ -249,22 +268,41 @@ static const struct intel_device_info intel_sandybridge_m_info = { GEN_DEFAULT_PIPEOFFSETS, \ IVB_CURSOR_OFFSETS -static const struct intel_device_info intel_ivybridge_d_info = { - GEN7_FEATURES, - .platform = INTEL_IVYBRIDGE, - .has_l3_dpf = 1, +#define IVB_D_PLATFORM \ + GEN7_FEATURES, \ + .platform = INTEL_IVYBRIDGE, \ + .has_l3_dpf = 1 + +static const struct intel_device_info intel_ivybridge_d_gt1_info = { + IVB_D_PLATFORM, + .gt = 1, }; -static const struct intel_device_info intel_ivybridge_m_info = { - GEN7_FEATURES, - .platform = INTEL_IVYBRIDGE, - .is_mobile = 1, - .has_l3_dpf = 1, +static const struct intel_device_info intel_ivybridge_d_gt2_info = { + IVB_D_PLATFORM, + .gt = 2, +}; + +#define IVB_M_PLATFORM \ + GEN7_FEATURES, \ + .platform = INTEL_IVYBRIDGE, \ + .is_mobile = 1, \ + .has_l3_dpf = 1 + +static const struct intel_device_info intel_ivybridge_m_gt1_info = { + IVB_M_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_ivybridge_m_gt2_info = { + IVB_M_PLATFORM, + .gt = 2, }; static const struct intel_device_info intel_ivybridge_q_info = { GEN7_FEATURES, .platform = INTEL_IVYBRIDGE, + .gt = 2, .num_pipes = 0, /* legal, last one wins */ .has_l3_dpf = 1, }; @@ -299,10 +337,24 @@ static const struct intel_device_info intel_valleyview_info = { .has_rc6p = 0 /* RC6p removed-by HSW */, \ .has_runtime_pm = 1 -static const struct intel_device_info intel_haswell_info = { - HSW_FEATURES, - .platform = INTEL_HASWELL, - .has_l3_dpf = 1, +#define HSW_PLATFORM \ + HSW_FEATURES, \ + .platform = INTEL_HASWELL, \ + .has_l3_dpf = 1 + +static const struct intel_device_info intel_haswell_gt1_info = { + HSW_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_haswell_gt2_info = { + HSW_PLATFORM, + .gt = 2, +}; + +static const struct intel_device_info intel_haswell_gt3_info = { + HSW_PLATFORM, + .gt = 3, }; #define BDW_FEATURES \ @@ -318,12 +370,27 @@ static const struct intel_device_info intel_haswell_info = { .gen = 8, \ .platform = INTEL_BROADWELL -static const struct intel_device_info intel_broadwell_info = { +static const struct intel_device_info intel_broadwell_gt1_info = { + BDW_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_broadwell_gt2_info = { BDW_PLATFORM, + .gt = 2, +}; + +static const struct intel_device_info intel_broadwell_rsvd_info = { + BDW_PLATFORM, + .gt = 3, + /* According to the device ID those devices are GT3, they were + * previously treated as not GT3, keep it like that. + */ }; static const struct intel_device_info intel_broadwell_gt3_info = { BDW_PLATFORM, + .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; @@ -358,13 +425,29 @@ static const struct intel_device_info intel_cherryview_info = { .has_guc = 1, \ .ddb_size = 896 -static const struct intel_device_info intel_skylake_info = { +static const struct intel_device_info intel_skylake_gt1_info = { SKL_PLATFORM, + .gt = 1, }; -static const struct intel_device_info intel_skylake_gt3_info = { +static const struct intel_device_info intel_skylake_gt2_info = { SKL_PLATFORM, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, + .gt = 2, +}; + +#define SKL_GT3_PLUS_PLATFORM \ + SKL_PLATFORM, \ + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING + + +static const struct intel_device_info intel_skylake_gt3_info = { + SKL_GT3_PLUS_PLATFORM, + .gt = 3, +}; + +static const struct intel_device_info intel_skylake_gt4_info = { + SKL_GT3_PLUS_PLATFORM, + .gt = 4, }; #define GEN9_LP_FEATURES \ @@ -416,12 +499,19 @@ static const struct intel_device_info intel_geminilake_info = { .has_guc = 1, \ .ddb_size = 896 -static const struct intel_device_info intel_kabylake_info = { +static const struct intel_device_info intel_kabylake_gt1_info = { KBL_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_kabylake_gt2_info = { + KBL_PLATFORM, + .gt = 2, }; static const struct intel_device_info intel_kabylake_gt3_info = { KBL_PLATFORM, + .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; @@ -434,20 +524,28 @@ static const struct intel_device_info intel_kabylake_gt3_info = { .has_guc = 1, \ .ddb_size = 896 -static const struct intel_device_info intel_coffeelake_info = { +static const struct intel_device_info intel_coffeelake_gt1_info = { + CFL_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_coffeelake_gt2_info = { CFL_PLATFORM, + .gt = 2, }; static const struct intel_device_info intel_coffeelake_gt3_info = { CFL_PLATFORM, + .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; -static const struct intel_device_info intel_cannonlake_info = { +static const struct intel_device_info intel_cannonlake_gt2_info = { BDW_FEATURES, .is_alpha_support = 1, .platform = INTEL_CANNONLAKE, .gen = 10, + .gt = 2, .ddb_size = 1024, .has_csr = 1, .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 } @@ -476,31 +574,40 @@ static const struct pci_device_id pciidlist[] = { INTEL_PINEVIEW_IDS(&intel_pineview_info), INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), - INTEL_SNB_D_IDS(&intel_sandybridge_d_info), - INTEL_SNB_M_IDS(&intel_sandybridge_m_info), + INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info), + INTEL_SNB_D_GT2_IDS(&intel_sandybridge_d_gt2_info), + INTEL_SNB_M_GT1_IDS(&intel_sandybridge_m_gt1_info), + INTEL_SNB_M_GT2_IDS(&intel_sandybridge_m_gt2_info), INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ - INTEL_IVB_M_IDS(&intel_ivybridge_m_info), - INTEL_IVB_D_IDS(&intel_ivybridge_d_info), - INTEL_HSW_IDS(&intel_haswell_info), + INTEL_IVB_M_GT1_IDS(&intel_ivybridge_m_gt1_info), + INTEL_IVB_M_GT2_IDS(&intel_ivybridge_m_gt2_info), + INTEL_IVB_D_GT1_IDS(&intel_ivybridge_d_gt1_info), + INTEL_IVB_D_GT2_IDS(&intel_ivybridge_d_gt2_info), + INTEL_HSW_GT1_IDS(&intel_haswell_gt1_info), + INTEL_HSW_GT2_IDS(&intel_haswell_gt2_info), + INTEL_HSW_GT3_IDS(&intel_haswell_gt3_info), INTEL_VLV_IDS(&intel_valleyview_info), - INTEL_BDW_GT12_IDS(&intel_broadwell_info), + INTEL_BDW_GT1_IDS(&intel_broadwell_gt1_info), + INTEL_BDW_GT2_IDS(&intel_broadwell_gt2_info), INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info), - INTEL_BDW_RSVD_IDS(&intel_broadwell_info), + INTEL_BDW_RSVD_IDS(&intel_broadwell_rsvd_info), INTEL_CHV_IDS(&intel_cherryview_info), - INTEL_SKL_GT1_IDS(&intel_skylake_info), - INTEL_SKL_GT2_IDS(&intel_skylake_info), + INTEL_SKL_GT1_IDS(&intel_skylake_gt1_info), + INTEL_SKL_GT2_IDS(&intel_skylake_gt2_info), INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), - INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), + INTEL_SKL_GT4_IDS(&intel_skylake_gt4_info), INTEL_BXT_IDS(&intel_broxton_info), INTEL_GLK_IDS(&intel_geminilake_info), - INTEL_KBL_GT1_IDS(&intel_kabylake_info), - INTEL_KBL_GT2_IDS(&intel_kabylake_info), + INTEL_KBL_GT1_IDS(&intel_kabylake_gt1_info), + INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info), INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), - INTEL_CFL_S_IDS(&intel_coffeelake_info), - INTEL_CFL_H_IDS(&intel_coffeelake_info), - INTEL_CFL_U_IDS(&intel_coffeelake_gt3_info), - INTEL_CNL_IDS(&intel_cannonlake_info), + INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), + INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), + INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), + INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), + INTEL_CNL_U_GT2_IDS(&intel_cannonlake_gt2_info), + INTEL_CNL_Y_GT2_IDS(&intel_cannonlake_gt2_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 34c8f5600ce054..1257e15c1a034c 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -118,92 +118,125 @@ #define INTEL_IRONLAKE_M_IDS(info) \ INTEL_VGA_DEVICE(0x0046, info) -#define INTEL_SNB_D_IDS(info) \ +#define INTEL_SNB_D_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x0102, info), \ - INTEL_VGA_DEVICE(0x0112, info), \ - INTEL_VGA_DEVICE(0x0122, info), \ INTEL_VGA_DEVICE(0x010A, info) -#define INTEL_SNB_M_IDS(info) \ - INTEL_VGA_DEVICE(0x0106, info), \ +#define INTEL_SNB_D_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0112, info), \ + INTEL_VGA_DEVICE(0x0122, info) + +#define INTEL_SNB_D_IDS(info) \ + INTEL_SNB_D_GT1_IDS(info), \ + INTEL_SNB_D_GT2_IDS(info) + +#define INTEL_SNB_M_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0106, info) + +#define INTEL_SNB_M_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x0116, info), \ INTEL_VGA_DEVICE(0x0126, info) +#define INTEL_SNB_M_IDS(info) \ + INTEL_SNB_M_GT1_IDS(info), \ + INTEL_SNB_M_GT2_IDS(info) + +#define INTEL_IVB_M_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x0156, info) /* GT1 mobile */ + +#define INTEL_IVB_M_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */ + #define INTEL_IVB_M_IDS(info) \ - INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \ - INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */ + INTEL_IVB_M_GT1_IDS(info), \ + INTEL_IVB_M_GT2_IDS(info) -#define INTEL_IVB_D_IDS(info) \ +#define INTEL_IVB_D_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \ + INTEL_VGA_DEVICE(0x015a, info) /* GT1 server */ + +#define INTEL_IVB_D_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \ - INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \ INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */ +#define INTEL_IVB_D_IDS(info) \ + INTEL_IVB_D_GT1_IDS(info), \ + INTEL_IVB_D_GT2_IDS(info) + #define INTEL_IVB_Q_IDS(info) \ INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */ -#define INTEL_HSW_IDS(info) \ +#define INTEL_HSW_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \ - INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \ - INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \ INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \ - INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \ - INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \ INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \ - INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \ - INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \ INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \ - INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \ - INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \ INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \ - INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \ - INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \ INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \ - INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \ - INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \ INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \ - INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \ - INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \ INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \ - INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \ - INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \ INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \ - INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \ - INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \ INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \ - INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \ - INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \ INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \ - INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \ - INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \ INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \ - INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \ - INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \ INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \ - INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \ - INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \ INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \ - INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \ - INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \ INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \ - INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \ - INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \ INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \ + INTEL_VGA_DEVICE(0x0D06, info) /* CRW GT1 mobile */ + +#define INTEL_HSW_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \ + INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \ + INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \ + INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \ + INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \ + INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \ + INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \ + INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \ + INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \ INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \ INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \ - INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \ INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \ - INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \ - INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \ INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \ - INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \ - INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \ INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \ + INTEL_VGA_DEVICE(0x0D16, info) /* CRW GT2 mobile */ + +#define INTEL_HSW_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \ + INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \ + INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \ + INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \ + INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \ + INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \ + INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \ + INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \ + INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \ + INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \ INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \ - INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \ - INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \ INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */ +#define INTEL_HSW_IDS(info) \ + INTEL_HSW_GT1_IDS(info), \ + INTEL_HSW_GT2_IDS(info), \ + INTEL_HSW_GT3_IDS(info) + #define INTEL_VLV_IDS(info) \ INTEL_VGA_DEVICE(0x0f30, info), \ INTEL_VGA_DEVICE(0x0f31, info), \ @@ -212,17 +245,19 @@ INTEL_VGA_DEVICE(0x0157, info), \ INTEL_VGA_DEVICE(0x0155, info) -#define INTEL_BDW_GT12_IDS(info) \ +#define INTEL_BDW_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \ INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \ INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \ INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \ - INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \ + INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \ + INTEL_VGA_DEVICE(0x160D, info) /* GT1 Workstation */ + +#define INTEL_BDW_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \ INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \ INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \ - INTEL_VGA_DEVICE(0x161E, info), /* GT2 ULX */ \ - INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \ - INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \ + INTEL_VGA_DEVICE(0x161E, info), /* GT2 ULX */ \ INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \ INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */ @@ -243,7 +278,8 @@ INTEL_VGA_DEVICE(0x163D, info) /* Workstation */ #define INTEL_BDW_IDS(info) \ - INTEL_BDW_GT12_IDS(info), \ + INTEL_BDW_GT1_IDS(info), \ + INTEL_BDW_GT2_IDS(info), \ INTEL_BDW_GT3_IDS(info), \ INTEL_BDW_RSVD_IDS(info) @@ -335,20 +371,22 @@ INTEL_KBL_GT4_IDS(info) /* CFL S */ -#define INTEL_CFL_S_IDS(info) \ +#define INTEL_CFL_S_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \ - INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \ + INTEL_VGA_DEVICE(0x3E93, info) /* SRV GT1 */ + +#define INTEL_CFL_S_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x3E96, info) /* SRV GT2 */ /* CFL H */ -#define INTEL_CFL_H_IDS(info) \ +#define INTEL_CFL_H_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \ INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */ /* CFL U */ -#define INTEL_CFL_U_IDS(info) \ +#define INTEL_CFL_U_GT3_IDS(info) \ INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \ From ba86431b7b588d887aca002596a42890edee9bd0 Mon Sep 17 00:00:00 2001 From: Anuj Phogat Date: Wed, 20 Sep 2017 13:31:26 -0700 Subject: [PATCH 2014/3326] drm/i915/kbl: Change a KBL pci id to GT2 from GT1.5 commit 41693fd5237397d3c61b311af0fda1f6f39297c2 upstream. See Mesa commit 9c588ff Cc: Matt Turner Cc: Rodrigo Vivi Signed-off-by: Anuj Phogat Reviewed-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20170920203126.1323-1-anuj.phogat@gmail.com Signed-off-by: Greg Kroah-Hartman --- include/drm/i915_pciids.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 1257e15c1a034c..972a25633525d9 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -339,7 +339,6 @@ #define INTEL_KBL_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \ INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \ - INTEL_VGA_DEVICE(0x5917, info), /* DT GT1.5 */ \ INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ @@ -349,6 +348,7 @@ #define INTEL_KBL_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \ INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \ INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \ INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \ From 0fe1e5ec1c27115afdaf245cc1a8b42765448381 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Wed, 13 Dec 2017 12:04:25 -0800 Subject: [PATCH 2015/3326] x86/gpu: add CFL to early quirks commit 33aa69ed8aacd92dea12671e52eb3ca6ac2d7a49 upstream. CFL was missing from intel_early_ids[]. The PCI ID needs to be there to allow the memory region to be stolen, otherwise we could have RAM being arbitrarily overwritten if for example we keep using the UEFI framebuffer, depending on how BIOS has set up the e820 map. Fixes: b056f8f3d6b9 ("drm/i915/cfl: Add Coffee Lake PCI IDs for S Skus.") Signed-off-by: Lucas De Marchi Cc: Rodrigo Vivi Cc: Anusha Srivatsa Cc: Jani Nikula Cc: Joonas Lahtinen Cc: David Airlie Cc: intel-gfx@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org Cc: Ingo Molnar Cc: H. Peter Anvin Cc: Thomas Gleixner Cc: x86@kernel.org Cc: # v4.13+ 0890540e21cf drm/i915: add GT number to intel_device_info Cc: # v4.13+ 41693fd52373 drm/i915/kbl: Change a KBL pci id to GT2 from GT1.5 Cc: # v4.13+ Reviewed-by: Rodrigo Vivi Acked-by: Jani Nikula Acked-by: Ingo Molnar Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20171213200425.2954-1-lucas.demarchi@intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/early-quirks.c | 1 + include/drm/i915_pciids.h | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 1e82f787c1603b..c87560e1e3ef47 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -527,6 +527,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = { INTEL_SKL_IDS(&gen9_early_ops), INTEL_BXT_IDS(&gen9_early_ops), INTEL_KBL_IDS(&gen9_early_ops), + INTEL_CFL_IDS(&gen9_early_ops), INTEL_GLK_IDS(&gen9_early_ops), INTEL_CNL_IDS(&gen9_early_ops), }; diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 972a25633525d9..c65e4489006d30 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -392,6 +392,12 @@ INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x3EA5, info) /* ULT GT3 */ +#define INTEL_CFL_IDS(info) \ + INTEL_CFL_S_GT1_IDS(info), \ + INTEL_CFL_S_GT2_IDS(info), \ + INTEL_CFL_H_GT2_IDS(info), \ + INTEL_CFL_U_GT3_IDS(info) + /* CNL U 2+2 */ #define INTEL_CNL_U_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x5A52, info), \ From 04f048fad32e191d48c1a260d6aa74e27cfe846e Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 29 Jan 2018 14:08:45 +0300 Subject: [PATCH 2016/3326] x86/kexec: Make kexec (mostly) work in 5-level paging mode commit 5bf30316991d5bcda046343ee77d823cf16fdd03 upstream. Currently kexec() will crash when switching into a 5-level paging enabled kernel. I missed that we need to change relocate_kernel() to set CR4.LA57 flag if the kernel has 5-level paging enabled. I avoided using #ifdef CONFIG_X86_5LEVEL here and inferred if we need to enable 5-level paging from previous CR4 value. This way the code is ready for boot-time switching between paging modes. With this patch applied, in addition to kexec 4-to-4 which always worked, we can kexec 4-to-5 and 5-to-5 - while 5-to-4 will need more work. Reported-by: Baoquan He Signed-off-by: Kirill A. Shutemov Tested-by: Baoquan He Cc: # v4.14+ Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Fixes: 77ef56e4f0fb ("x86: Enable 5-level paging support via CONFIG_X86_5LEVEL=y") Link: http://lkml.kernel.org/r/20180129110845.26633-1-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/relocate_kernel_64.S | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 307d3bac5f04ec..11eda21eb697df 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -68,6 +68,9 @@ relocate_kernel: movq %cr4, %rax movq %rax, CR4(%r11) + /* Save CR4. Required to enable the right paging mode later. */ + movq %rax, %r13 + /* zero out flags, and disable interrupts */ pushq $0 popfq @@ -126,8 +129,13 @@ identity_mapped: /* * Set cr4 to a known state: * - physical address extension enabled + * - 5-level paging, if it was enabled before */ movl $X86_CR4_PAE, %eax + testq $X86_CR4_LA57, %r13 + jz 1f + orl $X86_CR4_LA57, %eax +1: movq %rax, %cr4 jmp 1f From 41fd295d90a7d8518f3d3d705884fb05df9596f0 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 1 Feb 2018 13:40:19 +0100 Subject: [PATCH 2017/3326] x86/xen: init %gs very early to avoid page faults with stack protector commit 4f277295e54c5b7340e48efea3fc5cc21a2872b7 upstream. When running as Xen pv guest %gs is initialized some time after C code is started. Depending on stack protector usage this might be too late, resulting in page faults. So setup %gs and MSR_GS_BASE in assembly code already. Cc: stable@vger.kernel.org Signed-off-by: Juergen Gross Reviewed-by: Boris Ostrovsky Tested-by: Chris Patterson Signed-off-by: Juergen Gross Signed-off-by: Greg Kroah-Hartman --- arch/x86/xen/xen-head.S | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index 497cc55a0c16cc..96f26e026783d8 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -9,7 +9,9 @@ #include #include +#include #include +#include #include #include @@ -35,6 +37,20 @@ ENTRY(startup_xen) mov %_ASM_SI, xen_start_info mov $init_thread_union+THREAD_SIZE, %_ASM_SP +#ifdef CONFIG_X86_64 + /* Set up %gs. + * + * The base of %gs always points to the bottom of the irqstack + * union. If the stack protector canary is enabled, it is + * located at %gs:40. Note that, on SMP, the boot cpu uses + * init data section till per cpu areas are set up. + */ + movl $MSR_GS_BASE,%ecx + movq $INIT_PER_CPU_VAR(irq_stack_union),%rax + cdq + wrmsr +#endif + jmp xen_start_kernel END(startup_xen) __FINIT From 43d38b079ca7466a9eebf2936584e091f8c20349 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 6 Feb 2018 18:55:12 +0100 Subject: [PATCH 2018/3326] x86: PM: Make APM idle driver initialize polling state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit f859422075165e32c00c8d75d63f300015cc07ae upstream. Update the APM driver overlooked by commit 1b39e3f813b4 (cpuidle: Make drivers initialize polling state) to initialize the polling state like the other cpuidle drivers modified by that commit to prevent cpuidle from crashing. Fixes: 1b39e3f813b4 (cpuidle: Make drivers initialize polling state) Reported-by: Ville Syrjälä Tested-by: Ville Syrjälä Cc: 4.14+ # 4.14+ Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/apm_32.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index e4b0d92b3ae067..2a7fd56e67b364 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -2389,6 +2389,7 @@ static int __init apm_init(void) if (HZ != 100) idle_period = (idle_period * HZ) / 100; if (idle_threshold < 100) { + cpuidle_poll_state_init(&apm_idle_driver); if (!cpuidle_register_driver(&apm_idle_driver)) if (cpuidle_register_device(&apm_cpuidle_device)) cpuidle_unregister_driver(&apm_idle_driver); From 2bc19b518cb25150572ae4fe34ea6d623d733e2e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 5 Feb 2018 17:18:05 -0800 Subject: [PATCH 2019/3326] x86/entry/64: Clear extra registers beyond syscall arguments, to reduce speculation attack surface commit 8e1eb3fa009aa7c0b944b3c8b26b07de0efb3200 upstream. At entry userspace may have (maliciously) populated the extra registers outside the syscall calling convention with arbitrary values that could be useful in a speculative execution (Spectre style) attack. Clear these registers to minimize the kernel's attack surface. Note, this only clears the extra registers and not the unused registers for syscalls less than 6 arguments, since those registers are likely to be clobbered well before their values could be put to use under speculation. Note, Linus found that the XOR instructions can be executed with minimized cost if interleaved with the PUSH instructions, and Ingo's analysis found that R10 and R11 should be included in the register clearing beyond the typical 'extra' syscall calling convention registers. Suggested-by: Linus Torvalds Reported-by: Andi Kleen Signed-off-by: Dan Williams Cc: Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/151787988577.7847.16733592218894189003.stgit@dwillia2-desk3.amr.corp.intel.com [ Made small improvements to the changelog and the code comments. ] Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 16e2d72e79a097..5771ae24ba5869 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -231,13 +231,26 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) pushq %r8 /* pt_regs->r8 */ pushq %r9 /* pt_regs->r9 */ pushq %r10 /* pt_regs->r10 */ + /* + * Clear extra registers that a speculation attack might + * otherwise want to exploit. Interleave XOR with PUSH + * for better uop scheduling: + */ + xorq %r10, %r10 /* nospec r10 */ pushq %r11 /* pt_regs->r11 */ + xorq %r11, %r11 /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp */ + xorl %ebp, %ebp /* nospec rbp */ pushq %r12 /* pt_regs->r12 */ + xorq %r12, %r12 /* nospec r12 */ pushq %r13 /* pt_regs->r13 */ + xorq %r13, %r13 /* nospec r13 */ pushq %r14 /* pt_regs->r14 */ + xorq %r14, %r14 /* nospec r14 */ pushq %r15 /* pt_regs->r15 */ + xorq %r15, %r15 /* nospec r15 */ UNWIND_HINT_REGS TRACE_IRQS_OFF From ec35f83de1c80e4decdbda0d61cd1f8924ae606f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 5 Feb 2018 17:18:17 -0800 Subject: [PATCH 2020/3326] x86/entry/64/compat: Clear registers for compat syscalls, to reduce speculation attack surface commit 6b8cf5cc9965673951f1ab3f0e3cf23d06e3e2ee upstream. At entry userspace may have populated registers with values that could otherwise be useful in a speculative execution attack. Clear them to minimize the kernel's attack surface. Originally-From: Andi Kleen Signed-off-by: Dan Williams Cc: Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/151787989697.7847.4083702787288600552.stgit@dwillia2-desk3.amr.corp.intel.com [ Made small improvements to the changelog. ] Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64_compat.S | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 98d5358e4041a7..fd65e016e4133f 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -85,15 +85,25 @@ ENTRY(entry_SYSENTER_compat) pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ + xorq %r8, %r8 /* nospec r8 */ pushq $0 /* pt_regs->r9 = 0 */ + xorq %r9, %r9 /* nospec r9 */ pushq $0 /* pt_regs->r10 = 0 */ + xorq %r10, %r10 /* nospec r10 */ pushq $0 /* pt_regs->r11 = 0 */ + xorq %r11, %r11 /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp (will be overwritten) */ + xorl %ebp, %ebp /* nospec rbp */ pushq $0 /* pt_regs->r12 = 0 */ + xorq %r12, %r12 /* nospec r12 */ pushq $0 /* pt_regs->r13 = 0 */ + xorq %r13, %r13 /* nospec r13 */ pushq $0 /* pt_regs->r14 = 0 */ + xorq %r14, %r14 /* nospec r14 */ pushq $0 /* pt_regs->r15 = 0 */ + xorq %r15, %r15 /* nospec r15 */ cld /* @@ -214,15 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe) pushq %rbp /* pt_regs->cx (stashed in bp) */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ + xorq %r8, %r8 /* nospec r8 */ pushq $0 /* pt_regs->r9 = 0 */ + xorq %r9, %r9 /* nospec r9 */ pushq $0 /* pt_regs->r10 = 0 */ + xorq %r10, %r10 /* nospec r10 */ pushq $0 /* pt_regs->r11 = 0 */ + xorq %r11, %r11 /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp (will be overwritten) */ + xorl %ebp, %ebp /* nospec rbp */ pushq $0 /* pt_regs->r12 = 0 */ + xorq %r12, %r12 /* nospec r12 */ pushq $0 /* pt_regs->r13 = 0 */ + xorq %r13, %r13 /* nospec r13 */ pushq $0 /* pt_regs->r14 = 0 */ + xorq %r14, %r14 /* nospec r14 */ pushq $0 /* pt_regs->r15 = 0 */ + xorq %r15, %r15 /* nospec r15 */ /* * User mode is traced as though IRQs are on, and SYSENTER @@ -338,15 +358,25 @@ ENTRY(entry_INT80_compat) pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ + xorq %r8, %r8 /* nospec r8 */ pushq $0 /* pt_regs->r9 = 0 */ + xorq %r9, %r9 /* nospec r9 */ pushq $0 /* pt_regs->r10 = 0 */ + xorq %r10, %r10 /* nospec r10 */ pushq $0 /* pt_regs->r11 = 0 */ + xorq %r11, %r11 /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp */ + xorl %ebp, %ebp /* nospec rbp */ pushq %r12 /* pt_regs->r12 */ + xorq %r12, %r12 /* nospec r12 */ pushq %r13 /* pt_regs->r13 */ + xorq %r13, %r13 /* nospec r13 */ pushq %r14 /* pt_regs->r14 */ + xorq %r14, %r14 /* nospec r14 */ pushq %r15 /* pt_regs->r15 */ + xorq %r15, %r15 /* nospec r15 */ cld /* From 045e5161abfcfefce5b2777788e5913d85bb5c6f Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 1 Feb 2018 11:21:58 +0100 Subject: [PATCH 2021/3326] compiler-gcc.h: Introduce __optimize function attribute commit df5d45aa08f848b79caf395211b222790534ccc7 upstream. Create a new function attribute __optimize, which allows to specify an optimization level on a per-function basis. Signed-off-by: Geert Uytterhoeven Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- include/linux/compiler-gcc.h | 4 ++++ include/linux/compiler.h | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 2272ded07496d6..7bba8e28c52916 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -196,6 +196,10 @@ #endif /* __CHECKER__ */ #endif /* GCC_VERSION >= 40300 */ +#if GCC_VERSION >= 40400 +#define __optimize(level) __attribute__((__optimize__(level))) +#endif /* GCC_VERSION >= 40400 */ + #if GCC_VERSION >= 40500 #ifndef __CHECKER__ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index fab5dc250c61a8..e8c9cd18bb054a 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -266,6 +266,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s #endif /* __ASSEMBLY__ */ +#ifndef __optimize +# define __optimize(level) +#endif + /* Compile time object size, -1 for unknown */ #ifndef __compiletime_object_size # define __compiletime_object_size(obj) -1 From fff8ad7c18a238a32a56056980fccd40ed8cce83 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 1 Feb 2018 11:21:59 +0100 Subject: [PATCH 2022/3326] compiler-gcc.h: __nostackprotector needs gcc-4.4 and up MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d9afaaa4ff7af8b87d4a205e48cb8a6f666d7f01 upstream. Gcc versions before 4.4 do not recognize the __optimize__ compiler attribute: warning: ‘__optimize__’ attribute directive ignored Fixes: 7375ae3a0b79ea07 ("compiler-gcc.h: Introduce __nostackprotector function attribute") Signed-off-by: Geert Uytterhoeven Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- include/linux/compiler-gcc.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 7bba8e28c52916..bf09213895f7a9 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -167,8 +167,6 @@ #if GCC_VERSION >= 40100 # define __compiletime_object_size(obj) __builtin_object_size(obj, 0) - -#define __nostackprotector __attribute__((__optimize__("no-stack-protector"))) #endif #if GCC_VERSION >= 40300 @@ -198,6 +196,7 @@ #if GCC_VERSION >= 40400 #define __optimize(level) __attribute__((__optimize__(level))) +#define __nostackprotector __optimize("no-stack-protector") #endif /* GCC_VERSION >= 40400 */ #if GCC_VERSION >= 40500 From b3d33c5f296b36a05e24828f96c8ea5e02a15b06 Mon Sep 17 00:00:00 2001 From: Artem Savkov Date: Tue, 6 Feb 2018 22:20:21 +0100 Subject: [PATCH 2023/3326] crypto: sun4i_ss_prng - fix return value of sun4i_ss_prng_generate commit dd78c832ffaf86eb6434e56de4bc3bc31f03f771 upstream. According to crypto/rng.h generate function should return 0 on success and < 0 on error. Fixes: b8ae5c7387ad ("crypto: sun4i-ss - support the Security System PRNG") Signed-off-by: Artem Savkov Acked-by: Corentin Labbe Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/sunxi-ss/sun4i-ss-prng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c index 0d01d16242527c..5754e0b92fb0cc 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c @@ -52,5 +52,5 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, writel(0, ss->base + SS_CTL); spin_unlock(&ss->slock); - return dlen; + return 0; } From 14d87b710338145e46940d0f751c5b7dd5b4c38f Mon Sep 17 00:00:00 2001 From: Artem Savkov Date: Tue, 6 Feb 2018 22:20:22 +0100 Subject: [PATCH 2024/3326] crypto: sun4i_ss_prng - convert lock to _bh in sun4i_ss_prng_generate commit 2e7d1d61ea6c0f1c4da5eb82cafac750d55637a7 upstream. Lockdep detects a possible deadlock in sun4i_ss_prng_generate() and throws an "inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage" warning. Disabling softirqs to fix this. Fixes: b8ae5c7387ad ("crypto: sun4i-ss - support the Security System PRNG") Signed-off-by: Artem Savkov Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/sunxi-ss/sun4i-ss-prng.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c index 5754e0b92fb0cc..63d636424161dc 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c @@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); ss = algt->ss; - spin_lock(&ss->slock); + spin_lock_bh(&ss->slock); writel(mode, ss->base + SS_CTL); @@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, } writel(0, ss->base + SS_CTL); - spin_unlock(&ss->slock); + spin_unlock_bh(&ss->slock); return 0; } From b973685e70c3b418ad46a03d6a9c81554c5c4ba2 Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Wed, 7 Feb 2018 17:35:51 +1100 Subject: [PATCH 2025/3326] powerpc/mm/radix: Split linear mapping on hot-unplug commit 4dd5f8a99e791a8c6500e3592f3ce81ae7edcde1 upstream. This patch splits the linear mapping if the hot-unplug range is smaller than the mapping size. The code detects if the mapping needs to be split into a smaller size and if so, uses the stop machine infrastructure to clear the existing mapping and then remap the remaining range using a smaller page size. The code will skip any region of the mapping that overlaps with kernel text and warn about it once. We don't want to remove a mapping where the kernel text and the LMB we intend to remove overlap in the same TLB mapping as it may affect the currently executing code. I've tested these changes under a kvm guest with 2 vcpus, from a split mapping point of view, some of the caveats mentioned above applied to the testing I did. Fixes: 4b5d62ca17a1 ("powerpc/mm: add radix__remove_section_mapping()") Signed-off-by: Balbir Singh [mpe: Tweak change log to match updated behaviour] Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/mm/pgtable-radix.c | 95 +++++++++++++++++++++++++-------- 1 file changed, 74 insertions(+), 21 deletions(-) diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index cfbbee941a76b1..17ae5c15a9e066 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -671,6 +672,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud) pud_clear(pud); } +struct change_mapping_params { + pte_t *pte; + unsigned long start; + unsigned long end; + unsigned long aligned_start; + unsigned long aligned_end; +}; + +static int stop_machine_change_mapping(void *data) +{ + struct change_mapping_params *params = + (struct change_mapping_params *)data; + + if (!data) + return -1; + + spin_unlock(&init_mm.page_table_lock); + pte_clear(&init_mm, params->aligned_start, params->pte); + create_physical_mapping(params->aligned_start, params->start); + create_physical_mapping(params->end, params->aligned_end); + spin_lock(&init_mm.page_table_lock); + return 0; +} + static void remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end) { @@ -699,6 +724,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr, } } +/* + * clear the pte and potentially split the mapping helper + */ +static void split_kernel_mapping(unsigned long addr, unsigned long end, + unsigned long size, pte_t *pte) +{ + unsigned long mask = ~(size - 1); + unsigned long aligned_start = addr & mask; + unsigned long aligned_end = addr + size; + struct change_mapping_params params; + bool split_region = false; + + if ((end - addr) < size) { + /* + * We're going to clear the PTE, but not flushed + * the mapping, time to remap and flush. The + * effects if visible outside the processor or + * if we are running in code close to the + * mapping we cleared, we are in trouble. + */ + if (overlaps_kernel_text(aligned_start, addr) || + overlaps_kernel_text(end, aligned_end)) { + /* + * Hack, just return, don't pte_clear + */ + WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel " + "text, not splitting\n", addr, end); + return; + } + split_region = true; + } + + if (split_region) { + params.pte = pte; + params.start = addr; + params.end = end; + params.aligned_start = addr & ~(size - 1); + params.aligned_end = min_t(unsigned long, aligned_end, + (unsigned long)__va(memblock_end_of_DRAM())); + stop_machine(stop_machine_change_mapping, ¶ms, NULL); + return; + } + + pte_clear(&init_mm, addr, pte); +} + static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end) { @@ -714,13 +785,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, continue; if (pmd_huge(*pmd)) { - if (!IS_ALIGNED(addr, PMD_SIZE) || - !IS_ALIGNED(next, PMD_SIZE)) { - WARN_ONCE(1, "%s: unaligned range\n", __func__); - continue; - } - - pte_clear(&init_mm, addr, (pte_t *)pmd); + split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd); continue; } @@ -745,13 +810,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr, continue; if (pud_huge(*pud)) { - if (!IS_ALIGNED(addr, PUD_SIZE) || - !IS_ALIGNED(next, PUD_SIZE)) { - WARN_ONCE(1, "%s: unaligned range\n", __func__); - continue; - } - - pte_clear(&init_mm, addr, (pte_t *)pud); + split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud); continue; } @@ -777,13 +836,7 @@ static void remove_pagetable(unsigned long start, unsigned long end) continue; if (pgd_huge(*pgd)) { - if (!IS_ALIGNED(addr, PGDIR_SIZE) || - !IS_ALIGNED(next, PGDIR_SIZE)) { - WARN_ONCE(1, "%s: unaligned range\n", __func__); - continue; - } - - pte_clear(&init_mm, addr, (pte_t *)pgd); + split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd); continue; } From 372ed448263dcc733f4ef3d24d704d2f016d734c Mon Sep 17 00:00:00 2001 From: Nadav Amit Date: Fri, 9 Feb 2018 09:06:38 -0800 Subject: [PATCH 2026/3326] x86/mm/pti: Fix PTI comment in entry_SYSCALL_64() commit 14b1fcc62043729d12e8ae00f8297ab2ffe9fa91 upstream. The comment is confusing since the path is taken when CONFIG_PAGE_TABLE_ISOLATION=y is disabled (while the comment says it is not taken). Signed-off-by: Nadav Amit Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: nadav.amit@gmail.com Link: http://lkml.kernel.org/r/20180209170638.15161-1-namit@vmware.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 5771ae24ba5869..62b381daadd29d 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -209,7 +209,7 @@ ENTRY(entry_SYSCALL_64) swapgs /* - * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it + * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it * is not required to switch CR3. */ movq %rsp, PER_CPU_VAR(rsp_scratch) From d5a1b559235a38d527e8e363a325705fa0325602 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 10 Feb 2018 23:39:22 +0000 Subject: [PATCH 2027/3326] x86/speculation: Update Speculation Control microcode blacklist commit 1751342095f0d2b36fa8114d8e12c5688c455ac4 upstream. Intel have retroactively blessed the 0xc2 microcode on Skylake mobile and desktop parts, and the Gemini Lake 0x22 microcode is apparently fine too. We blacklisted the latter purely because it was present with all the other problematic ones in the 2018-01-08 release, but now it's explicitly listed as OK. We still list 0x84 for the various Kaby Lake / Coffee Lake parts, as that appeared in one version of the blacklist and then reverted to 0x80 again. We can change it if 0x84 is actually announced to be safe. Signed-off-by: David Woodhouse Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: arjan.van.de.ven@intel.com Cc: jmattson@google.com Cc: karahmed@amazon.de Cc: kvm@vger.kernel.org Cc: pbonzini@redhat.com Cc: rkrcmar@redhat.com Cc: sironi@amazon.de Link: http://lkml.kernel.org/r/1518305967-31356-2-git-send-email-dwmw@amazon.co.uk Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/intel.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 4cf4f8cbc69da1..c05591f82f22ac 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -123,8 +123,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = { { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 }, { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, - { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 }, - { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 }, { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, @@ -136,8 +134,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = { { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, - /* Updated in the 20180108 release; blacklist until we know otherwise */ - { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 }, /* Observed in the wild */ { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b }, { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 }, From b7451cb6159a573736cf674e586c75daabd62ae5 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 12 Feb 2018 15:27:34 +0000 Subject: [PATCH 2028/3326] x86/speculation: Correct Speculation Control microcode blacklist again commit d37fc6d360a404b208547ba112e7dabb6533c7fc upstream. Arjan points out that the Intel document only clears the 0xc2 microcode on *some* parts with CPUID 506E3 (INTEL_FAM6_SKYLAKE_DESKTOP stepping 3). For the Skylake H/S platform it's OK but for Skylake E3 which has the same CPUID it isn't (yet) cleared. So removing it from the blacklist was premature. Put it back for now. Also, Arjan assures me that the 0x84 microcode for Kaby Lake which was featured in one of the early revisions of the Intel document was never released to the public, and won't be until/unless it is also validated as safe. So those can change to 0x80 which is what all *other* versions of the doc have identified. Once the retrospective testing of existing public microcodes is done, we should be back into a mode where new microcodes are only released in batches and we shouldn't even need to update the blacklist for those anyway, so this tweaking of the list isn't expected to be a thing which keeps happening. Requested-by: Arjan van de Ven Signed-off-by: David Woodhouse Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: arjan.van.de.ven@intel.com Cc: dave.hansen@intel.com Cc: kvm@vger.kernel.org Cc: pbonzini@redhat.com Link: http://lkml.kernel.org/r/1518449255-2182-1-git-send-email-dwmw@amazon.co.uk Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/intel.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index c05591f82f22ac..f5347173157c38 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -116,13 +116,14 @@ struct sku_microcode { u32 microcode; }; static const struct sku_microcode spectre_bad_microcodes[] = { - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 }, - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 }, - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 }, - { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 }, - { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 }, + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 }, + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 }, + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 }, + { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 }, + { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, + { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 }, { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, From bdc69a2ffa0105c1e736fbe4bfaa0b53bd3e915f Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 10 Feb 2018 23:39:23 +0000 Subject: [PATCH 2029/3326] Revert "x86/speculation: Simplify indirect_branch_prediction_barrier()" commit f208820a321f9b23d77d7eed89945d862d62a3ed upstream. This reverts commit 64e16720ea0879f8ab4547e3b9758936d483909b. We cannot call C functions like that, without marking all the call-clobbered registers as, well, clobbered. We might have got away with it for now because the __ibp_barrier() function was *fairly* unlikely to actually use any other registers. But no. Just no. Signed-off-by: David Woodhouse Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: arjan.van.de.ven@intel.com Cc: dave.hansen@intel.com Cc: jmattson@google.com Cc: karahmed@amazon.de Cc: kvm@vger.kernel.org Cc: pbonzini@redhat.com Cc: rkrcmar@redhat.com Cc: sironi@amazon.de Link: http://lkml.kernel.org/r/1518305967-31356-3-git-send-email-dwmw@amazon.co.uk Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/nospec-branch.h | 13 +++++++++---- arch/x86/include/asm/processor.h | 3 --- arch/x86/kernel/cpu/bugs.c | 6 ------ 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 4d57894635f242..300cc159b4a0a4 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -164,10 +164,15 @@ static inline void vmexit_fill_RSB(void) static inline void indirect_branch_prediction_barrier(void) { - alternative_input("", - "call __ibp_barrier", - X86_FEATURE_USE_IBPB, - ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory")); + asm volatile(ALTERNATIVE("", + "movl %[msr], %%ecx\n\t" + "movl %[val], %%eax\n\t" + "movl $0, %%edx\n\t" + "wrmsr", + X86_FEATURE_USE_IBPB) + : : [msr] "i" (MSR_IA32_PRED_CMD), + [val] "i" (PRED_CMD_IBPB) + : "eax", "ecx", "edx", "memory"); } #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c57c6e77c29ffb..db3e68bcfd9b1d 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -968,7 +968,4 @@ bool xen_set_default_idle(void); void stop_this_cpu(void *dummy); void df_debug(struct pt_regs *regs, long error_code); - -void __ibp_barrier(void); - #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 71949bf2de5ad3..61152aa533772d 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -337,9 +337,3 @@ ssize_t cpu_show_spectre_v2(struct device *dev, spectre_v2_module_string()); } #endif - -void __ibp_barrier(void) -{ - __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0); -} -EXPORT_SYMBOL_GPL(__ibp_barrier); From 474bd0b82e24e8b928910aba876c20c49098ae6d Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 10 Feb 2018 23:39:24 +0000 Subject: [PATCH 2030/3326] KVM/x86: Reduce retpoline performance impact in slot_handle_level_range(), by always inlining iterator helper methods MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 928a4c39484281f8ca366f53a1db79330d058401 upstream. With retpoline, tight loops of "call this function for every XXX" are very much pessimised by taking a prediction miss *every* time. This one is by far the biggest contributor to the guest launch time with retpoline. By marking the iterator slot_handle_…() functions always_inline, we can ensure that the indirect function call can be optimised away into a direct call and it actually generates slightly smaller code because some of the other conditionals can get optimised away too. Performance is now pretty close to what we see with nospectre_v2 on the command line. Suggested-by: Linus Torvalds Tested-by: Filippo Sironi Signed-off-by: David Woodhouse Reviewed-by: Filippo Sironi Acked-by: Paolo Bonzini Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: arjan.van.de.ven@intel.com Cc: dave.hansen@intel.com Cc: jmattson@google.com Cc: karahmed@amazon.de Cc: kvm@vger.kernel.org Cc: rkrcmar@redhat.com Link: http://lkml.kernel.org/r/1518305967-31356-4-git-send-email-dwmw@amazon.co.uk Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/mmu.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index beb7f8795bc1cc..ca000fc644bcc3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5063,7 +5063,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm) typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); /* The caller should hold mmu-lock before calling this function. */ -static bool +static __always_inline bool slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, int start_level, int end_level, gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) @@ -5093,7 +5093,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, return flush; } -static bool +static __always_inline bool slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, int start_level, int end_level, bool lock_flush_tlb) @@ -5104,7 +5104,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, lock_flush_tlb); } -static bool +static __always_inline bool slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) { @@ -5112,7 +5112,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); } -static bool +static __always_inline bool slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) { @@ -5120,7 +5120,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); } -static bool +static __always_inline bool slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) { From 07a3a99ed7f99d207328fae0da73c83be0799722 Mon Sep 17 00:00:00 2001 From: KarimAllah Ahmed Date: Sat, 10 Feb 2018 23:39:25 +0000 Subject: [PATCH 2031/3326] X86/nVMX: Properly set spec_ctrl and pred_cmd before merging MSRs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 206587a9fb764d71f035dc7f6d3b6488f5d5b304 upstream. These two variables should check whether SPEC_CTRL and PRED_CMD are supposed to be passed through to L2 guests or not. While msr_write_intercepted_l01 would return 'true' if it is not passed through. So just invert the result of msr_write_intercepted_l01 to implement the correct semantics. Signed-off-by: KarimAllah Ahmed Signed-off-by: David Woodhouse Reviewed-by: Jim Mattson Acked-by: Paolo Bonzini Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Radim Krčmář Cc: Thomas Gleixner Cc: arjan.van.de.ven@intel.com Cc: dave.hansen@intel.com Cc: kvm@vger.kernel.org Cc: sironi@amazon.de Fixes: 086e7d4118cc ("KVM: VMX: Allow direct access to MSR_IA32_SPEC_CTRL") Link: http://lkml.kernel.org/r/1518305967-31356-5-git-send-email-dwmw@amazon.co.uk Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0ea909ca45c2e3..9d03bca5cd1c9d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -10216,8 +10216,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, * updated to reflect this when L1 (or its L2s) actually write to * the MSR. */ - bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); - bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); + bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); + bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && !pred_cmd && !spec_ctrl) From a15bdf6579b23f62bb0130941158dd2f9b94f079 Mon Sep 17 00:00:00 2001 From: KarimAllah Ahmed Date: Sat, 10 Feb 2018 23:39:26 +0000 Subject: [PATCH 2032/3326] KVM/nVMX: Set the CPU_BASED_USE_MSR_BITMAPS if we have a valid L02 MSR bitmap MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 3712caeb14dcb33fb4d5114f14c0beef10aca101 upstream. We either clear the CPU_BASED_USE_MSR_BITMAPS and end up intercepting all MSR accesses or create a valid L02 MSR bitmap and use that. This decision has to be made every time we evaluate whether we are going to generate the L02 MSR bitmap. Before commit: d28b387fb74d ("KVM/VMX: Allow direct access to MSR_IA32_SPEC_CTRL") ... this was probably OK since the decision was always identical. This is no longer the case now since the MSR bitmap might actually change once we decide to not intercept SPEC_CTRL and PRED_CMD. Signed-off-by: KarimAllah Ahmed Signed-off-by: David Woodhouse Acked-by: Paolo Bonzini Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Radim Krčmář Cc: Thomas Gleixner Cc: arjan.van.de.ven@intel.com Cc: dave.hansen@intel.com Cc: jmattson@google.com Cc: kvm@vger.kernel.org Cc: sironi@amazon.de Link: http://lkml.kernel.org/r/1518305967-31356-6-git-send-email-dwmw@amazon.co.uk Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9d03bca5cd1c9d..dd35c6c505162d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -10127,7 +10127,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, if (cpu_has_vmx_msr_bitmap() && nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) && nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) - ; + vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, + CPU_BASED_USE_MSR_BITMAPS); else vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_USE_MSR_BITMAPS); From 2efd067aa4ef25386d4abbe2fe9b2c7f22cd9302 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 13 Feb 2018 09:03:08 +0100 Subject: [PATCH 2033/3326] x86/speculation: Clean up various Spectre related details commit 21e433bdb95bdf3aa48226fd3d33af608437f293 upstream. Harmonize all the Spectre messages so that a: dmesg | grep -i spectre ... gives us most Spectre related kernel boot messages. Also fix a few other details: - clarify a comment about firmware speculation control - s/KPTI/PTI - remove various line-breaks that made the code uglier Acked-by: David Woodhouse Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/bugs.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 61152aa533772d..4acf16a76d1e45 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -162,8 +162,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) return SPECTRE_V2_CMD_NONE; else { - ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, - sizeof(arg)); + ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); if (ret < 0) return SPECTRE_V2_CMD_AUTO; @@ -175,8 +174,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) } if (i >= ARRAY_SIZE(mitigation_options)) { - pr_err("unknown option (%s). Switching to AUTO select\n", - mitigation_options[i].option); + pr_err("unknown option (%s). Switching to AUTO select\n", mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; } } @@ -185,8 +183,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && !IS_ENABLED(CONFIG_RETPOLINE)) { - pr_err("%s selected but not compiled in. Switching to AUTO select\n", - mitigation_options[i].option); + pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; } @@ -256,14 +253,14 @@ static void __init spectre_v2_select_mitigation(void) goto retpoline_auto; break; } - pr_err("kernel not compiled with retpoline; no mitigation available!"); + pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!"); return; retpoline_auto: if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { retpoline_amd: if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { - pr_err("LFENCE not serializing. Switching to generic retpoline\n"); + pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); goto retpoline_generic; } mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : @@ -281,7 +278,7 @@ static void __init spectre_v2_select_mitigation(void) pr_info("%s\n", spectre_v2_strings[mode]); /* - * If neither SMEP or KPTI are available, there is a risk of + * If neither SMEP nor PTI are available, there is a risk of * hitting userspace addresses in the RSB after a context switch * from a shallow call stack to a deeper one. To prevent this fill * the entire RSB, even when using IBRS. @@ -295,21 +292,20 @@ static void __init spectre_v2_select_mitigation(void) if ((!boot_cpu_has(X86_FEATURE_PTI) && !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); - pr_info("Filling RSB on context switch\n"); + pr_info("Spectre v2 mitigation: Filling RSB on context switch\n"); } /* Initialize Indirect Branch Prediction Barrier if supported */ if (boot_cpu_has(X86_FEATURE_IBPB)) { setup_force_cpu_cap(X86_FEATURE_USE_IBPB); - pr_info("Enabling Indirect Branch Prediction Barrier\n"); + pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); } } #undef pr_fmt #ifdef CONFIG_SYSFS -ssize_t cpu_show_meltdown(struct device *dev, - struct device_attribute *attr, char *buf) +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) return sprintf(buf, "Not affected\n"); @@ -318,16 +314,14 @@ ssize_t cpu_show_meltdown(struct device *dev, return sprintf(buf, "Vulnerable\n"); } -ssize_t cpu_show_spectre_v1(struct device *dev, - struct device_attribute *attr, char *buf) +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) { if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) return sprintf(buf, "Not affected\n"); return sprintf(buf, "Mitigation: __user pointer sanitization\n"); } -ssize_t cpu_show_spectre_v2(struct device *dev, - struct device_attribute *attr, char *buf) +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) { if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) return sprintf(buf, "Not affected\n"); From c59cce7943dff0fbafcaa4e71149ee1da879d03f Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 10 Feb 2018 19:13:58 +0100 Subject: [PATCH 2034/3326] PM / runtime: Update links_count also if !CONFIG_SRCU commit 433986c2c265d106d6a8e88006e0131fefc92b7b upstream. Commit baa8809f6097 (PM / runtime: Optimize the use of device links) added an invocation of pm_runtime_drop_link() to __device_link_del(). However there are two variants of that function, one for CONFIG_SRCU and another for !CONFIG_SRCU, and the commit only modified the former. Fixes: baa8809f6097 (PM / runtime: Optimize the use of device links) Cc: v4.10+ # v4.10+ Signed-off-by: Lukas Wunner Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/base/core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/base/core.c b/drivers/base/core.c index 12ebd055724cd6..c8501cdb95f49b 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -313,6 +313,9 @@ static void __device_link_del(struct device_link *link) dev_info(link->consumer, "Dropping the link to %s\n", dev_name(link->supplier)); + if (link->flags & DL_FLAG_PM_RUNTIME) + pm_runtime_drop_link(link->consumer); + list_del(&link->s_node); list_del(&link->c_node); device_link_free(link); From e578fedba81e72e195fe71f893828d4597714db5 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 12 Feb 2018 11:34:22 +0100 Subject: [PATCH 2035/3326] PM: cpuidle: Fix cpuidle_poll_state_init() prototype commit d7212cfb05ba802bea4dd6c90d61cfe6366ea224 upstream. Commit f85942207516 (x86: PM: Make APM idle driver initialize polling state) made apm_init() call cpuidle_poll_state_init(), but that only is defined for CONFIG_CPU_IDLE set, so make the empty stub of it available for CONFIG_CPU_IDLE unset too to fix the resulting build issue. Fixes: f85942207516 (x86: PM: Make APM idle driver initialize polling state) Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- include/linux/cpuidle.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 8f7788d23b5732..a6989e02d0a06b 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -225,7 +225,7 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, } #endif -#ifdef CONFIG_ARCH_HAS_CPU_RELAX +#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX) void cpuidle_poll_state_init(struct cpuidle_driver *drv); #else static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {} From d10d0bb86d974c41ccbbf1f22044e700597a84d0 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 5 Feb 2018 17:18:11 -0800 Subject: [PATCH 2036/3326] x86/entry/64: Clear registers for exceptions/interrupts, to reduce speculation attack surface commit 3ac6d8c787b835b997eb23e43e09aa0895ef7d58 upstream. Clear the 'extra' registers on entering the 64-bit kernel for exceptions and interrupts. The common registers are not cleared since they are likely clobbered well before they can be exploited in a speculative execution attack. Originally-From: Andi Kleen Signed-off-by: Dan Williams Cc: Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/151787989146.7847.15749181712358213254.stgit@dwillia2-desk3.amr.corp.intel.com [ Made small improvements to the changelog and the code comments. ] Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 19 +++++++++++++++++++ arch/x86/entry/entry_64.S | 6 +++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 3f48f695d5e6ac..f4b129d4af42ff 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -147,6 +147,25 @@ For 32-bit we have the following conventions - kernel is built with UNWIND_HINT_REGS offset=\offset .endm + /* + * Sanitize registers of values that a speculation attack + * might otherwise want to exploit. The lower registers are + * likely clobbered well before they could be put to use in + * a speculative execution gadget: + */ + .macro CLEAR_REGS_NOSPEC + xorl %ebp, %ebp + xorl %ebx, %ebx + xorq %r8, %r8 + xorq %r9, %r9 + xorq %r10, %r10 + xorq %r11, %r11 + xorq %r12, %r12 + xorq %r13, %r13 + xorq %r14, %r14 + xorq %r15, %r15 + .endm + .macro POP_EXTRA_REGS popq %r15 popq %r14 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 62b381daadd29d..33737a709538a4 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -571,6 +571,7 @@ END(irq_entries_start) ALLOC_PT_GPREGS_ON_STACK SAVE_C_REGS SAVE_EXTRA_REGS + CLEAR_REGS_NOSPEC ENCODE_FRAME_POINTER testb $3, CS(%rsp) @@ -1127,6 +1128,7 @@ ENTRY(xen_failsafe_callback) ALLOC_PT_GPREGS_ON_STACK SAVE_C_REGS SAVE_EXTRA_REGS + CLEAR_REGS_NOSPEC ENCODE_FRAME_POINTER jmp error_exit END(xen_failsafe_callback) @@ -1172,6 +1174,7 @@ ENTRY(paranoid_entry) cld SAVE_C_REGS 8 SAVE_EXTRA_REGS 8 + CLEAR_REGS_NOSPEC ENCODE_FRAME_POINTER 8 movl $1, %ebx movl $MSR_GS_BASE, %ecx @@ -1224,8 +1227,8 @@ ENTRY(error_entry) cld SAVE_C_REGS 8 SAVE_EXTRA_REGS 8 + CLEAR_REGS_NOSPEC ENCODE_FRAME_POINTER 8 - xorl %ebx, %ebx testb $3, CS+8(%rsp) jz .Lerror_kernelspace @@ -1422,6 +1425,7 @@ ENTRY(nmi) pushq %r14 /* pt_regs->r14 */ pushq %r15 /* pt_regs->r15 */ UNWIND_HINT_REGS + CLEAR_REGS_NOSPEC ENCODE_FRAME_POINTER /* From 51209eec238ffd1b4edec999a024159bbae51a3d Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 11:49:42 +0100 Subject: [PATCH 2037/3326] x86/entry/64: Merge SAVE_C_REGS and SAVE_EXTRA_REGS, remove unused extensions commit 2e3f0098bc45f710a2f4cbcc94b80a1fae7a99a1 upstream. All current code paths call SAVE_C_REGS and then immediately SAVE_EXTRA_REGS. Therefore, merge these two macros and order the MOV sequeneces properly. While at it, remove the macros to save all except specific registers, as these macros have been unused for a long time. Suggested-by: Linus Torvalds Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-2-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 57 +++++++++++---------------------------- arch/x86/entry/entry_64.S | 12 +++------ 2 files changed, 19 insertions(+), 50 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index f4b129d4af42ff..8907a6593b42a2 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -101,49 +101,22 @@ For 32-bit we have the following conventions - kernel is built with addq $-(15*8), %rsp .endm - .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 - .if \r11 - movq %r11, 6*8+\offset(%rsp) - .endif - .if \r8910 - movq %r10, 7*8+\offset(%rsp) - movq %r9, 8*8+\offset(%rsp) - movq %r8, 9*8+\offset(%rsp) - .endif - .if \rax - movq %rax, 10*8+\offset(%rsp) - .endif - .if \rcx - movq %rcx, 11*8+\offset(%rsp) - .endif - movq %rdx, 12*8+\offset(%rsp) - movq %rsi, 13*8+\offset(%rsp) + .macro SAVE_REGS offset=0 movq %rdi, 14*8+\offset(%rsp) - UNWIND_HINT_REGS offset=\offset extra=0 - .endm - .macro SAVE_C_REGS offset=0 - SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 - .endm - .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0 - SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1 - .endm - .macro SAVE_C_REGS_EXCEPT_R891011 - SAVE_C_REGS_HELPER 0, 1, 1, 0, 0 - .endm - .macro SAVE_C_REGS_EXCEPT_RCX_R891011 - SAVE_C_REGS_HELPER 0, 1, 0, 0, 0 - .endm - .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11 - SAVE_C_REGS_HELPER 0, 0, 0, 1, 0 - .endm - - .macro SAVE_EXTRA_REGS offset=0 - movq %r15, 0*8+\offset(%rsp) - movq %r14, 1*8+\offset(%rsp) - movq %r13, 2*8+\offset(%rsp) - movq %r12, 3*8+\offset(%rsp) - movq %rbp, 4*8+\offset(%rsp) + movq %rsi, 13*8+\offset(%rsp) + movq %rdx, 12*8+\offset(%rsp) + movq %rcx, 11*8+\offset(%rsp) + movq %rax, 10*8+\offset(%rsp) + movq %r8, 9*8+\offset(%rsp) + movq %r9, 8*8+\offset(%rsp) + movq %r10, 7*8+\offset(%rsp) + movq %r11, 6*8+\offset(%rsp) movq %rbx, 5*8+\offset(%rsp) + movq %rbp, 4*8+\offset(%rsp) + movq %r12, 3*8+\offset(%rsp) + movq %r13, 2*8+\offset(%rsp) + movq %r14, 1*8+\offset(%rsp) + movq %r15, 0*8+\offset(%rsp) UNWIND_HINT_REGS offset=\offset .endm @@ -197,7 +170,7 @@ For 32-bit we have the following conventions - kernel is built with * is just setting the LSB, which makes it an invalid stack address and is also * a signal to the unwinder that it's a pt_regs pointer in disguise. * - * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts + * NOTE: This macro must be used *after* SAVE_REGS because it corrupts * the original rbp. */ .macro ENCODE_FRAME_POINTER ptregs_offset=0 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 33737a709538a4..acd912cbd2e5d4 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -569,8 +569,7 @@ END(irq_entries_start) 1: ALLOC_PT_GPREGS_ON_STACK - SAVE_C_REGS - SAVE_EXTRA_REGS + SAVE_REGS CLEAR_REGS_NOSPEC ENCODE_FRAME_POINTER @@ -1126,8 +1125,7 @@ ENTRY(xen_failsafe_callback) UNWIND_HINT_IRET_REGS pushq $-1 /* orig_ax = -1 => not a system call */ ALLOC_PT_GPREGS_ON_STACK - SAVE_C_REGS - SAVE_EXTRA_REGS + SAVE_REGS CLEAR_REGS_NOSPEC ENCODE_FRAME_POINTER jmp error_exit @@ -1172,8 +1170,7 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1 ENTRY(paranoid_entry) UNWIND_HINT_FUNC cld - SAVE_C_REGS 8 - SAVE_EXTRA_REGS 8 + SAVE_REGS 8 CLEAR_REGS_NOSPEC ENCODE_FRAME_POINTER 8 movl $1, %ebx @@ -1225,8 +1222,7 @@ END(paranoid_exit) ENTRY(error_entry) UNWIND_HINT_FUNC cld - SAVE_C_REGS 8 - SAVE_EXTRA_REGS 8 + SAVE_REGS 8 CLEAR_REGS_NOSPEC ENCODE_FRAME_POINTER 8 testb $3, CS+8(%rsp) From bb68735527648cc3439bcacab5e6a30ab8219c46 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 11:49:43 +0100 Subject: [PATCH 2038/3326] x86/entry/64: Merge the POP_C_REGS and POP_EXTRA_REGS macros into a single POP_REGS macro commit 502af0d70843c2a9405d7ba1f79b4b0305aaf5f5 upstream. The two special, opencoded cases for POP_C_REGS can be handled by ASM macros. Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-3-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 15 +++++++++++---- arch/x86/entry/entry_64.S | 26 ++++---------------------- 2 files changed, 15 insertions(+), 26 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 8907a6593b42a2..3bda31736a7b4c 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -139,25 +139,32 @@ For 32-bit we have the following conventions - kernel is built with xorq %r15, %r15 .endm - .macro POP_EXTRA_REGS + .macro POP_REGS pop_rdi=1 skip_r11rcx=0 popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx - .endm - - .macro POP_C_REGS + .if \skip_r11rcx + popq %rsi + .else popq %r11 + .endif popq %r10 popq %r9 popq %r8 popq %rax + .if \skip_r11rcx + popq %rsi + .else popq %rcx + .endif popq %rdx popq %rsi + .if \pop_rdi popq %rdi + .endif .endm .macro icebp diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index acd912cbd2e5d4..f4d72b81d9c32d 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -330,15 +330,7 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ UNWIND_HINT_EMPTY - POP_EXTRA_REGS - popq %rsi /* skip r11 */ - popq %r10 - popq %r9 - popq %r8 - popq %rax - popq %rsi /* skip rcx */ - popq %rdx - popq %rsi + POP_REGS pop_rdi=0 skip_r11rcx=1 /* * Now all regs are restored except RSP and RDI. @@ -631,15 +623,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) ud2 1: #endif - POP_EXTRA_REGS - popq %r11 - popq %r10 - popq %r9 - popq %r8 - popq %rax - popq %rcx - popq %rdx - popq %rsi + POP_REGS pop_rdi=0 /* * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. @@ -697,8 +681,7 @@ GLOBAL(restore_regs_and_return_to_kernel) ud2 1: #endif - POP_EXTRA_REGS - POP_C_REGS + POP_REGS addq $8, %rsp /* skip regs->orig_ax */ INTERRUPT_RETURN @@ -1655,8 +1638,7 @@ end_repeat_nmi: nmi_swapgs: SWAPGS_UNSAFE_STACK nmi_restore: - POP_EXTRA_REGS - POP_C_REGS + POP_REGS /* * Skip orig_ax and the "outermost" frame to point RSP at the "iret" From 47d9c905ae7af1c16b58a41be627a74027f5b601 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 11:49:44 +0100 Subject: [PATCH 2039/3326] x86/entry/64: Interleave XOR register clearing with PUSH instructions commit f7bafa2b05ef25eda1d9179fd930b0330cf2b7d1 upstream. Same as is done for syscalls, interleave XOR with PUSH instructions for exceptions/interrupts, in order to minimize the cost of the additional instructions required for register clearing. Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-4-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 40 +++++++++++++++++++-------------------- arch/x86/entry/entry_64.S | 30 ++++++++++++++++++++--------- 2 files changed, 40 insertions(+), 30 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 3bda31736a7b4c..a05cbb81268d9b 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -101,44 +101,42 @@ For 32-bit we have the following conventions - kernel is built with addq $-(15*8), %rsp .endm - .macro SAVE_REGS offset=0 + .macro SAVE_AND_CLEAR_REGS offset=0 + /* + * Save registers and sanitize registers of values that a + * speculation attack might otherwise want to exploit. The + * lower registers are likely clobbered well before they + * could be put to use in a speculative execution gadget. + * Interleave XOR with PUSH for better uop scheduling: + */ movq %rdi, 14*8+\offset(%rsp) movq %rsi, 13*8+\offset(%rsp) movq %rdx, 12*8+\offset(%rsp) movq %rcx, 11*8+\offset(%rsp) movq %rax, 10*8+\offset(%rsp) movq %r8, 9*8+\offset(%rsp) + xorq %r8, %r8 /* nospec r8 */ movq %r9, 8*8+\offset(%rsp) + xorq %r9, %r9 /* nospec r9 */ movq %r10, 7*8+\offset(%rsp) + xorq %r10, %r10 /* nospec r10 */ movq %r11, 6*8+\offset(%rsp) + xorq %r11, %r11 /* nospec r11 */ movq %rbx, 5*8+\offset(%rsp) + xorl %ebx, %ebx /* nospec rbx */ movq %rbp, 4*8+\offset(%rsp) + xorl %ebp, %ebp /* nospec rbp */ movq %r12, 3*8+\offset(%rsp) + xorq %r12, %r12 /* nospec r12 */ movq %r13, 2*8+\offset(%rsp) + xorq %r13, %r13 /* nospec r13 */ movq %r14, 1*8+\offset(%rsp) + xorq %r14, %r14 /* nospec r14 */ movq %r15, 0*8+\offset(%rsp) + xorq %r15, %r15 /* nospec r15 */ UNWIND_HINT_REGS offset=\offset .endm - /* - * Sanitize registers of values that a speculation attack - * might otherwise want to exploit. The lower registers are - * likely clobbered well before they could be put to use in - * a speculative execution gadget: - */ - .macro CLEAR_REGS_NOSPEC - xorl %ebp, %ebp - xorl %ebx, %ebx - xorq %r8, %r8 - xorq %r9, %r9 - xorq %r10, %r10 - xorq %r11, %r11 - xorq %r12, %r12 - xorq %r13, %r13 - xorq %r14, %r14 - xorq %r15, %r15 - .endm - .macro POP_REGS pop_rdi=1 skip_r11rcx=0 popq %r15 popq %r14 @@ -177,7 +175,7 @@ For 32-bit we have the following conventions - kernel is built with * is just setting the LSB, which makes it an invalid stack address and is also * a signal to the unwinder that it's a pt_regs pointer in disguise. * - * NOTE: This macro must be used *after* SAVE_REGS because it corrupts + * NOTE: This macro must be used *after* SAVE_AND_CLEAR_REGS because it corrupts * the original rbp. */ .macro ENCODE_FRAME_POINTER ptregs_offset=0 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index f4d72b81d9c32d..3a48fa6b65535a 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -561,8 +561,7 @@ END(irq_entries_start) 1: ALLOC_PT_GPREGS_ON_STACK - SAVE_REGS - CLEAR_REGS_NOSPEC + SAVE_AND_CLEAR_REGS ENCODE_FRAME_POINTER testb $3, CS(%rsp) @@ -1108,8 +1107,7 @@ ENTRY(xen_failsafe_callback) UNWIND_HINT_IRET_REGS pushq $-1 /* orig_ax = -1 => not a system call */ ALLOC_PT_GPREGS_ON_STACK - SAVE_REGS - CLEAR_REGS_NOSPEC + SAVE_AND_CLEAR_REGS ENCODE_FRAME_POINTER jmp error_exit END(xen_failsafe_callback) @@ -1153,8 +1151,7 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1 ENTRY(paranoid_entry) UNWIND_HINT_FUNC cld - SAVE_REGS 8 - CLEAR_REGS_NOSPEC + SAVE_AND_CLEAR_REGS 8 ENCODE_FRAME_POINTER 8 movl $1, %ebx movl $MSR_GS_BASE, %ecx @@ -1205,8 +1202,7 @@ END(paranoid_exit) ENTRY(error_entry) UNWIND_HINT_FUNC cld - SAVE_REGS 8 - CLEAR_REGS_NOSPEC + SAVE_AND_CLEAR_REGS 8 ENCODE_FRAME_POINTER 8 testb $3, CS+8(%rsp) jz .Lerror_kernelspace @@ -1393,18 +1389,34 @@ ENTRY(nmi) pushq (%rdx) /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq %rax /* pt_regs->ax */ + /* + * Sanitize registers of values that a speculation attack + * might otherwise want to exploit. The lower registers are + * likely clobbered well before they could be put to use in + * a speculative execution gadget. Interleave XOR with PUSH + * for better uop scheduling: + */ pushq %r8 /* pt_regs->r8 */ + xorq %r8, %r8 /* nospec r8 */ pushq %r9 /* pt_regs->r9 */ + xorq %r9, %r9 /* nospec r9 */ pushq %r10 /* pt_regs->r10 */ + xorq %r10, %r10 /* nospec r10 */ pushq %r11 /* pt_regs->r11 */ + xorq %r11, %r11 /* nospec r11*/ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx*/ pushq %rbp /* pt_regs->rbp */ + xorl %ebp, %ebp /* nospec rbp*/ pushq %r12 /* pt_regs->r12 */ + xorq %r12, %r12 /* nospec r12*/ pushq %r13 /* pt_regs->r13 */ + xorq %r13, %r13 /* nospec r13*/ pushq %r14 /* pt_regs->r14 */ + xorq %r14, %r14 /* nospec r14*/ pushq %r15 /* pt_regs->r15 */ + xorq %r15, %r15 /* nospec r15*/ UNWIND_HINT_REGS - CLEAR_REGS_NOSPEC ENCODE_FRAME_POINTER /* From 3ce4863a44ff4e7e8d48d480853eea79cfd27bf6 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 11:49:45 +0100 Subject: [PATCH 2040/3326] x86/entry/64: Introduce the PUSH_AND_CLEAN_REGS macro commit 3f01daecd545e818098d84fd1ad43e19a508d705 upstream. Those instances where ALLOC_PT_GPREGS_ON_STACK is called just before SAVE_AND_CLEAR_REGS can trivially be replaced by PUSH_AND_CLEAN_REGS. This macro uses PUSH instead of MOV and should therefore be faster, at least on newer CPUs. Suggested-by: Linus Torvalds Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-5-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 36 ++++++++++++++++++++++++++++++++++++ arch/x86/entry/entry_64.S | 6 ++---- 2 files changed, 38 insertions(+), 4 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index a05cbb81268d9b..57b1b87a04f05f 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -137,6 +137,42 @@ For 32-bit we have the following conventions - kernel is built with UNWIND_HINT_REGS offset=\offset .endm + .macro PUSH_AND_CLEAR_REGS + /* + * Push registers and sanitize registers of values that a + * speculation attack might otherwise want to exploit. The + * lower registers are likely clobbered well before they + * could be put to use in a speculative execution gadget. + * Interleave XOR with PUSH for better uop scheduling: + */ + pushq %rdi /* pt_regs->di */ + pushq %rsi /* pt_regs->si */ + pushq %rdx /* pt_regs->dx */ + pushq %rcx /* pt_regs->cx */ + pushq %rax /* pt_regs->ax */ + pushq %r8 /* pt_regs->r8 */ + xorq %r8, %r8 /* nospec r8 */ + pushq %r9 /* pt_regs->r9 */ + xorq %r9, %r9 /* nospec r9 */ + pushq %r10 /* pt_regs->r10 */ + xorq %r10, %r10 /* nospec r10 */ + pushq %r11 /* pt_regs->r11 */ + xorq %r11, %r11 /* nospec r11*/ + pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx*/ + pushq %rbp /* pt_regs->rbp */ + xorl %ebp, %ebp /* nospec rbp*/ + pushq %r12 /* pt_regs->r12 */ + xorq %r12, %r12 /* nospec r12*/ + pushq %r13 /* pt_regs->r13 */ + xorq %r13, %r13 /* nospec r13*/ + pushq %r14 /* pt_regs->r14 */ + xorq %r14, %r14 /* nospec r14*/ + pushq %r15 /* pt_regs->r15 */ + xorq %r15, %r15 /* nospec r15*/ + UNWIND_HINT_REGS + .endm + .macro POP_REGS pop_rdi=1 skip_r11rcx=0 popq %r15 popq %r14 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 3a48fa6b65535a..cf8119c46c2a29 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -560,8 +560,7 @@ END(irq_entries_start) call switch_to_thread_stack 1: - ALLOC_PT_GPREGS_ON_STACK - SAVE_AND_CLEAR_REGS + PUSH_AND_CLEAR_REGS ENCODE_FRAME_POINTER testb $3, CS(%rsp) @@ -1106,8 +1105,7 @@ ENTRY(xen_failsafe_callback) addq $0x30, %rsp UNWIND_HINT_IRET_REGS pushq $-1 /* orig_ax = -1 => not a system call */ - ALLOC_PT_GPREGS_ON_STACK - SAVE_AND_CLEAR_REGS + PUSH_AND_CLEAR_REGS ENCODE_FRAME_POINTER jmp error_exit END(xen_failsafe_callback) From bd25388691c041903c084c68dc7da0fc45265d98 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 11:49:46 +0100 Subject: [PATCH 2041/3326] x86/entry/64: Use PUSH_AND_CLEAN_REGS in more cases commit 30907fd13bb593202574bb20af58d67c70a1ee14 upstream. entry_SYSCALL_64_after_hwframe() and nmi() can be converted to use PUSH_AND_CLEAN_REGS instead of opencoded variants thereof. Due to the interleaving, the additional XOR-based clearing of R8 and R9 in entry_SYSCALL_64_after_hwframe() should not have any noticeable negative implications. Suggested-by: Linus Torvalds Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-6-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 6 ++-- arch/x86/entry/entry_64.S | 65 ++------------------------------------- 2 files changed, 6 insertions(+), 65 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 57b1b87a04f05f..d6a97e2945ee7b 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -137,7 +137,7 @@ For 32-bit we have the following conventions - kernel is built with UNWIND_HINT_REGS offset=\offset .endm - .macro PUSH_AND_CLEAR_REGS + .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax /* * Push registers and sanitize registers of values that a * speculation attack might otherwise want to exploit. The @@ -147,9 +147,9 @@ For 32-bit we have the following conventions - kernel is built with */ pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ - pushq %rdx /* pt_regs->dx */ + pushq \rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ - pushq %rax /* pt_regs->ax */ + pushq \rax /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ xorq %r8, %r8 /* nospec r8 */ pushq %r9 /* pt_regs->r9 */ diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index cf8119c46c2a29..02c9b54203b677 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -223,35 +223,8 @@ ENTRY(entry_SYSCALL_64) pushq %rcx /* pt_regs->ip */ GLOBAL(entry_SYSCALL_64_after_hwframe) pushq %rax /* pt_regs->orig_ax */ - pushq %rdi /* pt_regs->di */ - pushq %rsi /* pt_regs->si */ - pushq %rdx /* pt_regs->dx */ - pushq %rcx /* pt_regs->cx */ - pushq $-ENOSYS /* pt_regs->ax */ - pushq %r8 /* pt_regs->r8 */ - pushq %r9 /* pt_regs->r9 */ - pushq %r10 /* pt_regs->r10 */ - /* - * Clear extra registers that a speculation attack might - * otherwise want to exploit. Interleave XOR with PUSH - * for better uop scheduling: - */ - xorq %r10, %r10 /* nospec r10 */ - pushq %r11 /* pt_regs->r11 */ - xorq %r11, %r11 /* nospec r11 */ - pushq %rbx /* pt_regs->rbx */ - xorl %ebx, %ebx /* nospec rbx */ - pushq %rbp /* pt_regs->rbp */ - xorl %ebp, %ebp /* nospec rbp */ - pushq %r12 /* pt_regs->r12 */ - xorq %r12, %r12 /* nospec r12 */ - pushq %r13 /* pt_regs->r13 */ - xorq %r13, %r13 /* nospec r13 */ - pushq %r14 /* pt_regs->r14 */ - xorq %r14, %r14 /* nospec r14 */ - pushq %r15 /* pt_regs->r15 */ - xorq %r15, %r15 /* nospec r15 */ - UNWIND_HINT_REGS + + PUSH_AND_CLEAR_REGS rax=$-ENOSYS TRACE_IRQS_OFF @@ -1382,39 +1355,7 @@ ENTRY(nmi) pushq 1*8(%rdx) /* pt_regs->rip */ UNWIND_HINT_IRET_REGS pushq $-1 /* pt_regs->orig_ax */ - pushq %rdi /* pt_regs->di */ - pushq %rsi /* pt_regs->si */ - pushq (%rdx) /* pt_regs->dx */ - pushq %rcx /* pt_regs->cx */ - pushq %rax /* pt_regs->ax */ - /* - * Sanitize registers of values that a speculation attack - * might otherwise want to exploit. The lower registers are - * likely clobbered well before they could be put to use in - * a speculative execution gadget. Interleave XOR with PUSH - * for better uop scheduling: - */ - pushq %r8 /* pt_regs->r8 */ - xorq %r8, %r8 /* nospec r8 */ - pushq %r9 /* pt_regs->r9 */ - xorq %r9, %r9 /* nospec r9 */ - pushq %r10 /* pt_regs->r10 */ - xorq %r10, %r10 /* nospec r10 */ - pushq %r11 /* pt_regs->r11 */ - xorq %r11, %r11 /* nospec r11*/ - pushq %rbx /* pt_regs->rbx */ - xorl %ebx, %ebx /* nospec rbx*/ - pushq %rbp /* pt_regs->rbp */ - xorl %ebp, %ebp /* nospec rbp*/ - pushq %r12 /* pt_regs->r12 */ - xorq %r12, %r12 /* nospec r12*/ - pushq %r13 /* pt_regs->r13 */ - xorq %r13, %r13 /* nospec r13*/ - pushq %r14 /* pt_regs->r14 */ - xorq %r14, %r14 /* nospec r14*/ - pushq %r15 /* pt_regs->r15 */ - xorq %r15, %r15 /* nospec r15*/ - UNWIND_HINT_REGS + PUSH_AND_CLEAR_REGS rdx=(%rdx) ENCODE_FRAME_POINTER /* From de66c3a3b0a6403702d6d6ead20ccd38bae615d7 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 11:49:47 +0100 Subject: [PATCH 2042/3326] x86/entry/64: Get rid of the ALLOC_PT_GPREGS_ON_STACK and SAVE_AND_CLEAR_REGS macros commit dde3036d62ba3375840b10ab9ec0d568fd773b07 upstream. Previously, error_entry() and paranoid_entry() saved the GP registers onto stack space previously allocated by its callers. Combine these two steps in the callers, and use the generic PUSH_AND_CLEAR_REGS macro for that. This adds a significant amount ot text size. However, Ingo Molnar points out that: "these numbers also _very_ significantly over-represent the extra footprint. The assumptions that resulted in us compressing the IRQ entry code have changed very significantly with the new x86 IRQ allocation code we introduced in the last year: - IRQ vectors are usually populated in tightly clustered groups. With our new vector allocator code the typical per CPU allocation percentage on x86 systems is ~3 device vectors and ~10 fixed vectors out of ~220 vectors - i.e. a very low ~6% utilization (!). [...] The days where we allocated a lot of vectors on every CPU and the compression of the IRQ entry code text mattered are over. - Another issue is that only a small minority of vectors is frequent enough to actually matter to cache utilization in practice: 3-4 key IPIs and 1-2 device IRQs at most - and those vectors tend to be tightly clustered as well into about two groups, and are probably already on 2-3 cache lines in practice. For the common case of 'cache cold' IRQs it's the depth of the call chain and the fragmentation of the resulting I$ that should be the main performance limit - not the overall size of it. - The CPU side cost of IRQ delivery is still very expensive even in the best, most cached case, as in 'over a thousand cycles'. So much stuff is done that maybe contemporary x86 IRQ entry microcode already prefetches the IDT entry and its expected call target address."[*] [*] http://lkml.kernel.org/r/20180208094710.qnjixhm6hybebdv7@gmail.com The "testb $3, CS(%rsp)" instruction in the idtentry macro does not need modification. Previously, %rsp was manually decreased by 15*8; with this patch, %rsp is decreased by 15 pushq instructions. [jpoimboe@redhat.com: unwind hint improvements] Suggested-by: Linus Torvalds Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-7-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 42 +-------------------------------------- arch/x86/entry/entry_64.S | 20 +++++++++---------- 2 files changed, 10 insertions(+), 52 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index d6a97e2945ee7b..59675010c9a060 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -97,46 +97,6 @@ For 32-bit we have the following conventions - kernel is built with #define SIZEOF_PTREGS 21*8 - .macro ALLOC_PT_GPREGS_ON_STACK - addq $-(15*8), %rsp - .endm - - .macro SAVE_AND_CLEAR_REGS offset=0 - /* - * Save registers and sanitize registers of values that a - * speculation attack might otherwise want to exploit. The - * lower registers are likely clobbered well before they - * could be put to use in a speculative execution gadget. - * Interleave XOR with PUSH for better uop scheduling: - */ - movq %rdi, 14*8+\offset(%rsp) - movq %rsi, 13*8+\offset(%rsp) - movq %rdx, 12*8+\offset(%rsp) - movq %rcx, 11*8+\offset(%rsp) - movq %rax, 10*8+\offset(%rsp) - movq %r8, 9*8+\offset(%rsp) - xorq %r8, %r8 /* nospec r8 */ - movq %r9, 8*8+\offset(%rsp) - xorq %r9, %r9 /* nospec r9 */ - movq %r10, 7*8+\offset(%rsp) - xorq %r10, %r10 /* nospec r10 */ - movq %r11, 6*8+\offset(%rsp) - xorq %r11, %r11 /* nospec r11 */ - movq %rbx, 5*8+\offset(%rsp) - xorl %ebx, %ebx /* nospec rbx */ - movq %rbp, 4*8+\offset(%rsp) - xorl %ebp, %ebp /* nospec rbp */ - movq %r12, 3*8+\offset(%rsp) - xorq %r12, %r12 /* nospec r12 */ - movq %r13, 2*8+\offset(%rsp) - xorq %r13, %r13 /* nospec r13 */ - movq %r14, 1*8+\offset(%rsp) - xorq %r14, %r14 /* nospec r14 */ - movq %r15, 0*8+\offset(%rsp) - xorq %r15, %r15 /* nospec r15 */ - UNWIND_HINT_REGS offset=\offset - .endm - .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax /* * Push registers and sanitize registers of values that a @@ -211,7 +171,7 @@ For 32-bit we have the following conventions - kernel is built with * is just setting the LSB, which makes it an invalid stack address and is also * a signal to the unwinder that it's a pt_regs pointer in disguise. * - * NOTE: This macro must be used *after* SAVE_AND_CLEAR_REGS because it corrupts + * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts * the original rbp. */ .macro ENCODE_FRAME_POINTER ptregs_offset=0 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 02c9b54203b677..8a3280bf79a13c 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -867,7 +867,9 @@ ENTRY(\sym) pushq $-1 /* ORIG_RAX: no syscall to restart */ .endif - ALLOC_PT_GPREGS_ON_STACK + /* Save all registers in pt_regs */ + PUSH_AND_CLEAR_REGS + ENCODE_FRAME_POINTER .if \paranoid < 2 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ @@ -1115,15 +1117,12 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1 #endif /* - * Save all registers in pt_regs, and switch gs if needed. + * Switch gs if needed. * Use slow, but surefire "are we in kernel?" check. * Return: ebx=0: need swapgs on exit, ebx=1: otherwise */ ENTRY(paranoid_entry) - UNWIND_HINT_FUNC cld - SAVE_AND_CLEAR_REGS 8 - ENCODE_FRAME_POINTER 8 movl $1, %ebx movl $MSR_GS_BASE, %ecx rdmsr @@ -1136,7 +1135,7 @@ ENTRY(paranoid_entry) SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 ret -END(paranoid_entry) +ENDPROC(paranoid_entry) /* * "Paranoid" exit path from exception stack. This is invoked @@ -1167,14 +1166,12 @@ ENTRY(paranoid_exit) END(paranoid_exit) /* - * Save all registers in pt_regs, and switch gs if needed. + * Switch gs if needed. * Return: EBX=0: came from user mode; EBX=1: otherwise */ ENTRY(error_entry) - UNWIND_HINT_FUNC + UNWIND_HINT_REGS offset=8 cld - SAVE_AND_CLEAR_REGS 8 - ENCODE_FRAME_POINTER 8 testb $3, CS+8(%rsp) jz .Lerror_kernelspace @@ -1565,7 +1562,8 @@ end_repeat_nmi: * frame to point back to repeat_nmi. */ pushq $-1 /* ORIG_RAX: no syscall to restart */ - ALLOC_PT_GPREGS_ON_STACK + PUSH_AND_CLEAR_REGS + ENCODE_FRAME_POINTER /* * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit From a816dd2fa2dcbde206bc67ebbdf8bbf75a54936c Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 11:49:48 +0100 Subject: [PATCH 2043/3326] x86/entry/64: Indent PUSH_AND_CLEAR_REGS and POP_REGS properly commit 92816f571af81e9a71cc6f3dc8ce1e2fcdf7b6b8 upstream. ... same as the other macros in arch/x86/entry/calling.h Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-8-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 59675010c9a060..6985440c68fa7b 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -97,7 +97,7 @@ For 32-bit we have the following conventions - kernel is built with #define SIZEOF_PTREGS 21*8 - .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax +.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax /* * Push registers and sanitize registers of values that a * speculation attack might otherwise want to exploit. The @@ -131,9 +131,9 @@ For 32-bit we have the following conventions - kernel is built with pushq %r15 /* pt_regs->r15 */ xorq %r15, %r15 /* nospec r15*/ UNWIND_HINT_REGS - .endm +.endm - .macro POP_REGS pop_rdi=1 skip_r11rcx=0 +.macro POP_REGS pop_rdi=1 skip_r11rcx=0 popq %r15 popq %r14 popq %r13 @@ -163,7 +163,7 @@ For 32-bit we have the following conventions - kernel is built with .macro icebp .byte 0xf1 - .endm +.endm /* * This is a sneaky trick to help the unwinder find pt_regs on the stack. The From bdcf05c64a64743f512f12b17d4f9d8b14d2ad31 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 12 Feb 2018 11:45:03 -0600 Subject: [PATCH 2044/3326] x86/entry/64: Fix paranoid_entry() frame pointer warning commit b3ccefaed922529e6a67de7b30af5aa38c76ace9 upstream. With the following commit: f09d160992d1 ("x86/entry/64: Get rid of the ALLOC_PT_GPREGS_ON_STACK and SAVE_AND_CLEAR_REGS macros") ... one of my suggested improvements triggered a frame pointer warning: arch/x86/entry/entry_64.o: warning: objtool: paranoid_entry()+0x11: call without frame pointer save/setup The warning is correct for the build-time code, but it's actually not relevant at runtime because of paravirt patching. The paravirt swapgs call gets replaced with either a SWAPGS instruction or NOPs at runtime. Go back to the previous behavior by removing the ELF function annotation for paranoid_entry() and adding an unwind hint, which effectively silences the warning. Reported-by: kbuild test robot Signed-off-by: Josh Poimboeuf Cc: Dominik Brodowski Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: kbuild-all@01.org Cc: tipbuild@zytor.com Fixes: f09d160992d1 ("x86/entry/64: Get rid of the ALLOC_PT_GPREGS_ON_STACK and SAVE_AND_CLEAR_REGS macros") Link: http://lkml.kernel.org/r/20180212174503.5acbymg5z6p32snu@treble Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 8a3280bf79a13c..c5ef704df06930 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1122,6 +1122,7 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise */ ENTRY(paranoid_entry) + UNWIND_HINT_FUNC cld movl $1, %ebx movl $MSR_GS_BASE, %ecx @@ -1135,7 +1136,7 @@ ENTRY(paranoid_entry) SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 ret -ENDPROC(paranoid_entry) +END(paranoid_entry) /* * "Paranoid" exit path from exception stack. This is invoked From 5cf7b883bcbaca7b85002770915e39eb6d510309 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 12 Feb 2018 21:13:18 +0100 Subject: [PATCH 2045/3326] x86/entry/64: Remove the unused 'icebp' macro commit b498c261107461d5c42140dfddd05df83d8ca078 upstream. That macro was touched around 2.5.8 times, judging by the full history linux repo, but it was unused even then. Get rid of it already. Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux@dominikbrodowski.net Link: http://lkml.kernel.org/r/20180212201318.GD14640@pd.tnic Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/calling.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 6985440c68fa7b..dce7092ab24a24 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -159,10 +159,6 @@ For 32-bit we have the following conventions - kernel is built with .if \pop_rdi popq %rdi .endif - .endm - - .macro icebp - .byte 0xf1 .endm /* From 301e6fe0915c8b2eab4da812de4f16c583e5b113 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 12:10:10 +0100 Subject: [PATCH 2046/3326] selftests/x86: Fix vDSO selftest segfault for vsyscall=none commit 198ee8e17502da2634f7366395db1d77630e0219 upstream. The vDSO selftest tries to execute a vsyscall unconditionally, even if it is not present on the test system (e.g. if booted with vsyscall=none or with CONFIG_LEGACY_VSYSCALL_NONE=y set. Fix this by copying (and tweaking) the vsyscall check from test_vsyscall.c Signed-off-by: Dominik Brodowski Cc: Andrew Lutomirski Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kselftest@vger.kernel.org Cc: shuah@kernel.org Link: http://lkml.kernel.org/r/20180211111013.16888-3-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/x86/test_vdso.c | 50 +++++++++++++++++++++---- 1 file changed, 43 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/x86/test_vdso.c b/tools/testing/selftests/x86/test_vdso.c index 29973cde06d3db..558c8207e7b9c0 100644 --- a/tools/testing/selftests/x86/test_vdso.c +++ b/tools/testing/selftests/x86/test_vdso.c @@ -28,18 +28,52 @@ int nerrs = 0; +typedef long (*getcpu_t)(unsigned *, unsigned *, void *); + +getcpu_t vgetcpu; +getcpu_t vdso_getcpu; + +static void *vsyscall_getcpu(void) +{ #ifdef __x86_64__ -# define VSYS(x) (x) + FILE *maps; + char line[128]; + bool found = false; + + maps = fopen("/proc/self/maps", "r"); + if (!maps) /* might still be present, but ignore it here, as we test vDSO not vsyscall */ + return NULL; + + while (fgets(line, sizeof(line), maps)) { + char r, x; + void *start, *end; + char name[128]; + if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s", + &start, &end, &r, &x, name) != 5) + continue; + + if (strcmp(name, "[vsyscall]")) + continue; + + /* assume entries are OK, as we test vDSO here not vsyscall */ + found = true; + break; + } + + fclose(maps); + + if (!found) { + printf("Warning: failed to find vsyscall getcpu\n"); + return NULL; + } + return (void *) (0xffffffffff600800); #else -# define VSYS(x) 0 + return NULL; #endif +} -typedef long (*getcpu_t)(unsigned *, unsigned *, void *); - -const getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800); -getcpu_t vdso_getcpu; -void fill_function_pointers() +static void fill_function_pointers() { void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); @@ -54,6 +88,8 @@ void fill_function_pointers() vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu"); if (!vdso_getcpu) printf("Warning: failed to find getcpu in vDSO\n"); + + vgetcpu = (getcpu_t) vsyscall_getcpu(); } static long sys_getcpu(unsigned * cpu, unsigned * node, From 3eb95d5187a3df8c0aa1d5f531a6b9f567fcd9a7 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 21:59:24 +0100 Subject: [PATCH 2047/3326] selftests/x86: Clean up and document sscanf() usage commit d8e92de8ef952bed88c56c7a44c02d8dcae0984e upstream. Replace a couple of magically connected buffer length literal constants with a common definition that makes their relationship obvious. Also document why our sscanf() usage is safe. No intended functional changes. Suggested-by: Ingo Molnar Signed-off-by: Dominik Brodowski Cc: Andrew Lutomirski Cc: Andy Lutomirski Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kselftest@vger.kernel.org Cc: shuah@kernel.org Link: http://lkml.kernel.org/r/20180211205924.GA23210@light.dominikbrodowski.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/x86/test_vdso.c | 11 ++++++++--- tools/testing/selftests/x86/test_vsyscall.c | 11 ++++++++--- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/x86/test_vdso.c b/tools/testing/selftests/x86/test_vdso.c index 558c8207e7b9c0..2352590117042e 100644 --- a/tools/testing/selftests/x86/test_vdso.c +++ b/tools/testing/selftests/x86/test_vdso.c @@ -26,6 +26,9 @@ # endif #endif +/* max length of lines in /proc/self/maps - anything longer is skipped here */ +#define MAPS_LINE_LEN 128 + int nerrs = 0; typedef long (*getcpu_t)(unsigned *, unsigned *, void *); @@ -37,17 +40,19 @@ static void *vsyscall_getcpu(void) { #ifdef __x86_64__ FILE *maps; - char line[128]; + char line[MAPS_LINE_LEN]; bool found = false; maps = fopen("/proc/self/maps", "r"); if (!maps) /* might still be present, but ignore it here, as we test vDSO not vsyscall */ return NULL; - while (fgets(line, sizeof(line), maps)) { + while (fgets(line, MAPS_LINE_LEN, maps)) { char r, x; void *start, *end; - char name[128]; + char name[MAPS_LINE_LEN]; + + /* sscanf() is safe here as strlen(name) >= strlen(line) */ if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s", &start, &end, &r, &x, name) != 5) continue; diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c index 6e0bd52ad53d06..003b6c55b10e42 100644 --- a/tools/testing/selftests/x86/test_vsyscall.c +++ b/tools/testing/selftests/x86/test_vsyscall.c @@ -33,6 +33,9 @@ # endif #endif +/* max length of lines in /proc/self/maps - anything longer is skipped here */ +#define MAPS_LINE_LEN 128 + static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), int flags) { @@ -98,7 +101,7 @@ static int init_vsys(void) #ifdef __x86_64__ int nerrs = 0; FILE *maps; - char line[128]; + char line[MAPS_LINE_LEN]; bool found = false; maps = fopen("/proc/self/maps", "r"); @@ -108,10 +111,12 @@ static int init_vsys(void) return 0; } - while (fgets(line, sizeof(line), maps)) { + while (fgets(line, MAPS_LINE_LEN, maps)) { char r, x; void *start, *end; - char name[128]; + char name[MAPS_LINE_LEN]; + + /* sscanf() is safe here as strlen(name) >= strlen(line) */ if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s", &start, &end, &r, &x, name) != 5) continue; From e6eced764e1a18665212c349f100188a337d8fa8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 13 Feb 2018 08:26:17 +0100 Subject: [PATCH 2048/3326] selftests/x86/pkeys: Remove unused functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit ce676638fe7b284132a7d7d5e7e7ad81bab9947e upstream. This also gets rid of two build warnings: protection_keys.c: In function ‘dumpit’: protection_keys.c:419:3: warning: ignoring return value of ‘write’, declared with attribute warn_unused_result [-Wunused-result] write(1, buf, nr_read); ^~~~~~~~~~~~~~~~~~~~~~ Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Dave Hansen Cc: Shuah Khan Cc: Andy Lutomirski Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/x86/protection_keys.c | 28 ------------------- 1 file changed, 28 deletions(-) diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c index 7a1cc0e56d2d6a..6cbb83b47150c0 100644 --- a/tools/testing/selftests/x86/protection_keys.c +++ b/tools/testing/selftests/x86/protection_keys.c @@ -393,34 +393,6 @@ pid_t fork_lazy_child(void) return forkret; } -void davecmp(void *_a, void *_b, int len) -{ - int i; - unsigned long *a = _a; - unsigned long *b = _b; - - for (i = 0; i < len / sizeof(*a); i++) { - if (a[i] == b[i]) - continue; - - dprintf3("[%3d]: a: %016lx b: %016lx\n", i, a[i], b[i]); - } -} - -void dumpit(char *f) -{ - int fd = open(f, O_RDONLY); - char buf[100]; - int nr_read; - - dprintf2("maps fd: %d\n", fd); - do { - nr_read = read(fd, &buf[0], sizeof(buf)); - write(1, buf, nr_read); - } while (nr_read > 0); - close(fd); -} - #define PKEY_DISABLE_ACCESS 0x1 #define PKEY_DISABLE_WRITE 0x2 From cf4db6342da4c433b814ccdad0461e1ca7477695 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 11 Feb 2018 12:10:11 +0100 Subject: [PATCH 2049/3326] selftests/x86: Do not rely on "int $0x80" in test_mremap_vdso.c commit 2cbc0d66de0480449c75636f55697c7ff3af61fc upstream. On 64-bit builds, we should not rely on "int $0x80" working (it only does if CONFIG_IA32_EMULATION=y is enabled). Without this patch, the move test may succeed, but the "int $0x80" causes a segfault, resulting in a false negative output of this self-test. Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Dmitry Safonov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kselftest@vger.kernel.org Cc: shuah@kernel.org Link: http://lkml.kernel.org/r/20180211111013.16888-4-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/x86/test_mremap_vdso.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/x86/test_mremap_vdso.c b/tools/testing/selftests/x86/test_mremap_vdso.c index bf0d687c7db75e..64f11c8d9b767d 100644 --- a/tools/testing/selftests/x86/test_mremap_vdso.c +++ b/tools/testing/selftests/x86/test_mremap_vdso.c @@ -90,8 +90,12 @@ int main(int argc, char **argv, char **envp) vdso_size += PAGE_SIZE; } +#ifdef __i386__ /* Glibc is likely to explode now - exit with raw syscall */ asm volatile ("int $0x80" : : "a" (__NR_exit), "b" (!!ret)); +#else /* __x86_64__ */ + syscall(SYS_exit, ret); +#endif } else { int status; From 8520ea2a043842b3b52d95210ee774ed16748ef5 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Tue, 13 Feb 2018 09:13:21 +0100 Subject: [PATCH 2050/3326] selftests/x86: Do not rely on "int $0x80" in single_step_syscall.c commit 4105c69703cdeba76f384b901712c9397b04e9c2 upstream. On 64-bit builds, we should not rely on "int $0x80" working (it only does if CONFIG_IA32_EMULATION=y is enabled). To keep the "Set TF and check int80" test running on 64-bit installs with CONFIG_IA32_EMULATION=y enabled, build this test only if we can also build 32-bit binaries (which should be a good approximation for that). Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Dmitry Safonov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kselftest@vger.kernel.org Cc: shuah@kernel.org Link: http://lkml.kernel.org/r/20180211111013.16888-5-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/x86/Makefile | 2 ++ tools/testing/selftests/x86/single_step_syscall.c | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 91fbfa8fdc150b..73b8ef665c987a 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -30,11 +30,13 @@ CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c) ifeq ($(CAN_BUILD_I386),1) all: all_32 TEST_PROGS += $(BINARIES_32) +EXTRA_CFLAGS += -DCAN_BUILD_32 endif ifeq ($(CAN_BUILD_X86_64),1) all: all_64 TEST_PROGS += $(BINARIES_64) +EXTRA_CFLAGS += -DCAN_BUILD_64 endif all_32: $(BINARIES_32) diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c index a48da95c18fdf1..ddfdd635de16c6 100644 --- a/tools/testing/selftests/x86/single_step_syscall.c +++ b/tools/testing/selftests/x86/single_step_syscall.c @@ -119,7 +119,9 @@ static void check_result(void) int main() { +#ifdef CAN_BUILD_32 int tmp; +#endif sethandler(SIGTRAP, sigtrap, 0); @@ -139,12 +141,13 @@ int main() : : "c" (post_nop) : "r11"); check_result(); #endif - +#ifdef CAN_BUILD_32 printf("[RUN]\tSet TF and check int80\n"); set_eflags(get_eflags() | X86_EFLAGS_TF); asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid) : INT80_CLOBBERS); check_result(); +#endif /* * This test is particularly interesting if fast syscalls use From 410d273ecc8ed1f30ef0d06acb2d22ffa29cd732 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Tue, 13 Feb 2018 09:15:19 +0100 Subject: [PATCH 2051/3326] selftests/x86: Disable tests requiring 32-bit support on pure 64-bit systems commit 9279ddf23ce78ff2676e8e8e19fec0f022c26d04 upstream. The ldt_gdt and ptrace_syscall selftests, even in their 64-bit variant, use hard-coded 32-bit syscall numbers and call "int $0x80". This will fail on 64-bit systems with CONFIG_IA32_EMULATION=y disabled. Therefore, do not build these tests if we cannot build 32-bit binaries (which should be a good approximation for CONFIG_IA32_EMULATION=y being enabled). Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Dmitry Safonov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kselftest@vger.kernel.org Cc: shuah@kernel.org Link: http://lkml.kernel.org/r/20180211111013.16888-6-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/x86/Makefile | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 73b8ef665c987a..aa6e2d7f6a1fdc 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -5,16 +5,26 @@ include ../lib.mk .PHONY: all all_32 all_64 warn_32bit_failure clean -TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \ - check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \ +UNAME_M := $(shell uname -m) +CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) +CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c) + +TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \ + check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \ protection_keys test_vdso test_vsyscall TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ test_FCMOV test_FCOMI test_FISTTP \ vdso_restorer TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip +# Some selftests require 32bit support enabled also on 64bit systems +TARGETS_C_32BIT_NEEDED := ldt_gdt ptrace_syscall -TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) +TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) $(TARGETS_C_32BIT_NEEDED) TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY) +ifeq ($(CAN_BUILD_I386)$(CAN_BUILD_X86_64),11) +TARGETS_C_64BIT_ALL += $(TARGETS_C_32BIT_NEEDED) +endif + BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32) BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64) @@ -23,10 +33,6 @@ BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64)) CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie -UNAME_M := $(shell uname -m) -CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) -CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c) - ifeq ($(CAN_BUILD_I386),1) all: all_32 TEST_PROGS += $(BINARIES_32) From 842a0d95ef16d8e3f85cd5316efc8c25934ff3ce Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Thu, 8 Feb 2018 17:09:25 -0600 Subject: [PATCH 2052/3326] objtool: Fix segfault in ignore_unreachable_insn() commit fe24e27128252c230a34a6c628da2bf1676781ea upstream. Peter Zijlstra's patch for converting WARN() to use UD2 triggered a bunch of false "unreachable instruction" warnings, which then triggered a seg fault in ignore_unreachable_insn(). The seg fault happened when it tried to dereference a NULL 'insn->func' pointer. Thanks to static_cpu_has(), some functions can jump to a non-function area in the .altinstr_aux section. That breaks ignore_unreachable_insn()'s assumption that it's always inside the original function. Make sure ignore_unreachable_insn() only follows jumps within the current function. Reported-by: Borislav Petkov Signed-off-by: Josh Poimboeuf Signed-off-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: kbuild test robot Link: http://lkml.kernel.org/r/bace77a60d5af9b45eddb8f8fb9c776c8de657ef.1518130694.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/objtool/check.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 2e458eb45586c4..c7fb5c2392ee6f 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -1935,13 +1935,19 @@ static bool ignore_unreachable_insn(struct instruction *insn) if (is_kasan_insn(insn) || is_ubsan_insn(insn)) return true; - if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) { - insn = insn->jump_dest; - continue; + if (insn->type == INSN_JUMP_UNCONDITIONAL) { + if (insn->jump_dest && + insn->jump_dest->func == insn->func) { + insn = insn->jump_dest; + continue; + } + + break; } if (insn->offset + insn->len >= insn->func->offset + insn->func->len) break; + insn = list_next_entry(insn, list); } From 4e0067c22d6b38a7b882e560b3db4c103166575e Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Thu, 8 Feb 2018 17:09:26 -0600 Subject: [PATCH 2053/3326] x86/debug, objtool: Annotate WARN()-related UD2 as reachable commit 2b5db66862b95532cb6cca8165ae6eb73633cf85 upstream. By default, objtool assumes that a UD2 is a dead end. This is mainly because GCC 7+ sometimes inserts a UD2 when it detects a divide-by-zero condition. Now that WARN() is moving back to UD2, annotate the code after it as reachable so objtool can follow the code flow. Reported-by: Borislav Petkov Signed-off-by: Josh Poimboeuf Signed-off-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: kbuild test robot Link: http://lkml.kernel.org/r/0e483379275a42626ba8898117f918e1bf661e40.1518130694.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/bug.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index 34d99af4399445..71e6f4bf9161d8 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -77,7 +77,11 @@ do { \ unreachable(); \ } while (0) -#define __WARN_FLAGS(flags) _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags)) +#define __WARN_FLAGS(flags) \ +do { \ + _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags)); \ + annotate_reachable(); \ +} while (0) #include From 41d315482500e9612d1906b7a057fbe9c4b6c676 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 9 Feb 2018 13:16:59 +0100 Subject: [PATCH 2054/3326] x86/debug: Use UD2 for WARN() commit 3b3a371cc9bc980429baabe0a8e5f307f3d1f463 upstream. Since the Intel SDM added an ModR/M byte to UD0 and binutils followed that specification, we now cannot disassemble our kernel anymore. This now means Intel and AMD disagree on the encoding of UD0. And instead of playing games with additional bytes that are valid ModR/M and single byte instructions (0xd6 for instance), simply use UD2 for both WARN() and BUG(). Requested-by: Linus Torvalds Signed-off-by: Peter Zijlstra (Intel) Acked-by: Linus Torvalds Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180208194406.GD25181@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/bug.h | 15 ++++++--------- arch/x86/kernel/traps.c | 2 +- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index 71e6f4bf9161d8..6804d66427673e 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -5,23 +5,20 @@ #include /* - * Since some emulators terminate on UD2, we cannot use it for WARN. - * Since various instruction decoders disagree on the length of UD1, - * we cannot use it either. So use UD0 for WARN. + * Despite that some emulators terminate on UD2, we use it for WARN(). * - * (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas - * our kernel decoder thinks it takes a ModRM byte, which seems consistent - * with various things like the Intel SDM instruction encoding rules) + * Since various instruction decoders/specs disagree on the encoding of + * UD0/UD1. */ -#define ASM_UD0 ".byte 0x0f, 0xff" +#define ASM_UD0 ".byte 0x0f, 0xff" /* + ModRM (for Intel) */ #define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */ #define ASM_UD2 ".byte 0x0f, 0x0b" #define INSN_UD0 0xff0f #define INSN_UD2 0x0b0f -#define LEN_UD0 2 +#define LEN_UD2 2 #ifdef CONFIG_GENERIC_BUG @@ -79,7 +76,7 @@ do { \ #define __WARN_FLAGS(flags) \ do { \ - _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags)); \ + _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \ annotate_reachable(); \ } while (0) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index b33e860d32fe80..0ddaeb2184a6f0 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -181,7 +181,7 @@ int fixup_bug(struct pt_regs *regs, int trapnr) break; case BUG_TRAP_TYPE_WARN: - regs->ip += LEN_UD0; + regs->ip += LEN_UD2; return 1; } From 9d4cb4dc7a110ef253e99808b66b45c726ba5596 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 6 Feb 2018 18:22:40 -0800 Subject: [PATCH 2055/3326] x86/speculation: Fix up array_index_nospec_mask() asm constraint commit be3233fbfcb8f5acb6e3bcd0895c3ef9e100d470 upstream. Allow the compiler to handle @size as an immediate value or memory directly rather than allocating a register. Reported-by: Linus Torvalds Signed-off-by: Dan Williams Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/151797010204.1289.1510000292250184993.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/barrier.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 1e7c955b630358..4db77731e13012 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -40,7 +40,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, asm ("cmp %1,%2; sbb %0,%0;" :"=r" (mask) - :"r"(size),"r" (index) + :"g"(size),"r" (index) :"cc"); return mask; } From 8b4cdbbb29d4eb34f02d5e1419789b53c3cbb930 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 5 Feb 2018 14:16:06 +0000 Subject: [PATCH 2056/3326] nospec: Move array_index_nospec() parameter checking into separate macro commit 8fa80c503b484ddc1abbd10c7cb2ab81f3824a50 upstream. For architectures providing their own implementation of array_index_mask_nospec() in asm/barrier.h, attempting to use WARN_ONCE() to complain about out-of-range parameters using WARN_ON() results in a mess of mutually-dependent include files. Rather than unpick the dependencies, simply have the core code in nospec.h perform the checking for us. Signed-off-by: Will Deacon Acked-by: Thomas Gleixner Cc: Dan Williams Cc: Linus Torvalds Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1517840166-15399-1-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- include/linux/nospec.h | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/include/linux/nospec.h b/include/linux/nospec.h index b99bced39ac2fa..fbc98e2c8228d0 100644 --- a/include/linux/nospec.h +++ b/include/linux/nospec.h @@ -19,20 +19,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, unsigned long size) { - /* - * Warn developers about inappropriate array_index_nospec() usage. - * - * Even if the CPU speculates past the WARN_ONCE branch, the - * sign bit of @index is taken into account when generating the - * mask. - * - * This warning is compiled out when the compiler can infer that - * @index and @size are less than LONG_MAX. - */ - if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, - "array_index_nospec() limited to range of [0, LONG_MAX]\n")) - return 0; - /* * Always calculate and emit the mask even if the compiler * thinks the mask is not needed. The compiler does not take @@ -43,6 +29,26 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, } #endif +/* + * Warn developers about inappropriate array_index_nospec() usage. + * + * Even if the CPU speculates past the WARN_ONCE branch, the + * sign bit of @index is taken into account when generating the + * mask. + * + * This warning is compiled out when the compiler can infer that + * @index and @size are less than LONG_MAX. + */ +#define array_index_mask_nospec_check(index, size) \ +({ \ + if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \ + "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \ + _mask = 0; \ + else \ + _mask = array_index_mask_nospec(index, size); \ + _mask; \ +}) + /* * array_index_nospec - sanitize an array index after a bounds check * @@ -61,7 +67,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, ({ \ typeof(index) _i = (index); \ typeof(size) _s = (size); \ - unsigned long _mask = array_index_mask_nospec(_i, _s); \ + unsigned long _mask = array_index_mask_nospec_check(_i, _s); \ \ BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ From 1fed58f610b57c1190862819eba6f7bc5396b08d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 13 Feb 2018 14:28:19 +0100 Subject: [PATCH 2057/3326] x86/speculation: Add dependency commit ea00f301285ea2f07393678cd2b6057878320c9d upstream. Joe Konno reported a compile failure resulting from using an MSR without inclusion of , and while the current code builds fine (by accident) this needs fixing for future patches. Reported-by: Joe Konno Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: arjan@linux.intel.com Cc: bp@alien8.de Cc: dan.j.williams@intel.com Cc: dave.hansen@linux.intel.com Cc: dwmw2@infradead.org Cc: dwmw@amazon.co.uk Cc: gregkh@linuxfoundation.org Cc: hpa@zytor.com Cc: jpoimboe@redhat.com Cc: linux-tip-commits@vger.kernel.org Cc: luto@kernel.org Fixes: 20ffa1caecca ("x86/speculation: Add basic IBPB (Indirect Branch Prediction Barrier) support") Link: http://lkml.kernel.org/r/20180213132819.GJ25201@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/nospec-branch.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 300cc159b4a0a4..76b058533e473b 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -6,6 +6,7 @@ #include #include #include +#include #ifdef __ASSEMBLY__ From 2abfcdf8e77d3719aa1d37b1f9de800fa596eda3 Mon Sep 17 00:00:00 2001 From: "Levin, Alexander (Sasha Levin)" Date: Wed, 15 Nov 2017 17:35:51 -0800 Subject: [PATCH 2058/3326] kmemcheck: remove annotations commit 4950276672fce5c241857540f8561c440663673d upstream. Patch series "kmemcheck: kill kmemcheck", v2. As discussed at LSF/MM, kill kmemcheck. KASan is a replacement that is able to work without the limitation of kmemcheck (single CPU, slow). KASan is already upstream. We are also not aware of any users of kmemcheck (or users who don't consider KASan as a suitable replacement). The only objection was that since KASAN wasn't supported by all GCC versions provided by distros at that time we should hold off for 2 years, and try again. Now that 2 years have passed, and all distros provide gcc that supports KASAN, kill kmemcheck again for the very same reasons. This patch (of 4): Remove kmemcheck annotations, and calls to kmemcheck from the kernel. [alexander.levin@verizon.com: correctly remove kmemcheck call from dma_map_sg_attrs] Link: http://lkml.kernel.org/r/20171012192151.26531-1-alexander.levin@verizon.com Link: http://lkml.kernel.org/r/20171007030159.22241-2-alexander.levin@verizon.com Signed-off-by: Sasha Levin Cc: Alexander Potapenko Cc: Eric W. Biederman Cc: Michal Hocko Cc: Pekka Enberg Cc: Steven Rostedt Cc: Tim Hansen Cc: Vegard Nossum Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/dma-iommu.h | 1 - arch/openrisc/include/asm/dma-mapping.h | 1 - arch/x86/Makefile | 5 ----- arch/x86/include/asm/dma-mapping.h | 1 - arch/x86/include/asm/xor.h | 5 +---- arch/x86/kernel/traps.c | 5 ----- arch/x86/mm/fault.c | 6 ------ drivers/char/random.c | 1 - drivers/misc/c2port/core.c | 2 -- fs/dcache.c | 2 -- include/linux/c2port.h | 4 ---- include/linux/dma-mapping.h | 8 +------- include/linux/filter.h | 2 -- include/linux/mm_types.h | 8 -------- include/linux/net.h | 3 --- include/linux/ring_buffer.h | 3 --- include/linux/skbuff.h | 3 --- include/net/inet_sock.h | 3 --- include/net/inet_timewait_sock.h | 4 ---- include/net/sock.h | 3 --- init/main.c | 1 - kernel/bpf/core.c | 6 ------ kernel/locking/lockdep.c | 3 --- kernel/trace/ring_buffer.c | 3 --- mm/kmemleak.c | 9 --------- mm/page_alloc.c | 14 -------------- mm/slab.c | 14 -------------- mm/slab.h | 2 -- mm/slub.c | 20 -------------------- net/core/skbuff.c | 5 ----- net/core/sock.c | 2 -- net/ipv4/inet_timewait_sock.c | 3 --- net/ipv4/tcp_input.c | 1 - net/socket.c | 1 - 34 files changed, 2 insertions(+), 152 deletions(-) diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 0722ec6be69238..6821f1249300d9 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -7,7 +7,6 @@ #include #include #include -#include #include #define ARM_MAPPING_ERROR (~(dma_addr_t)0x0) diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index f41bd3cb76d905..e212a1f0b6d255 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -23,7 +23,6 @@ */ #include -#include #include extern const struct dma_map_ops or1k_dma_map_ops; diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 504b1a4535acaa..fad55160dcb94a 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -158,11 +158,6 @@ ifdef CONFIG_X86_X32 endif export CONFIG_X86_X32_ABI -# Don't unroll struct assignments with kmemcheck enabled -ifeq ($(CONFIG_KMEMCHECK),y) - KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) -endif - # # If the function graph tracer is used with mcount instead of fentry, # '-maccumulate-outgoing-args' is needed to prevent a GCC bug diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 836ca1178a6afe..69f16f0729d01e 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -7,7 +7,6 @@ * Documentation/DMA-API.txt for documentation. */ -#include #include #include #include diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h index 1f5c5161ead682..45c8605467f137 100644 --- a/arch/x86/include/asm/xor.h +++ b/arch/x86/include/asm/xor.h @@ -1,7 +1,4 @@ -#ifdef CONFIG_KMEMCHECK -/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */ -# include -#elif !defined(_ASM_X86_XOR_H) +#ifndef _ASM_X86_XOR_H #define _ASM_X86_XOR_H /* diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 0ddaeb2184a6f0..a66428dc92ae0f 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -42,7 +42,6 @@ #include #endif -#include #include #include #include @@ -764,10 +763,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) if (!dr6 && user_mode(regs)) user_icebp = 1; - /* Catch kmemcheck conditions! */ - if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) - goto exit; - /* Store the virtualized DR6 value */ tsk->thread.debugreg6 = dr6; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b264b590eeec07..9150fe2c9b2697 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -20,7 +20,6 @@ #include /* boot_cpu_has, ... */ #include /* dotraplinkage, ... */ #include /* pgd_*(), ... */ -#include /* kmemcheck_*(), ... */ #include /* VSYSCALL_ADDR */ #include /* emulate_vsyscall */ #include /* struct vm86 */ @@ -1257,8 +1256,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * Detect and handle instructions that would cause a page fault for * both a tracked kernel page and a userspace page. */ - if (kmemcheck_active(regs)) - kmemcheck_hide(regs); prefetchw(&mm->mmap_sem); if (unlikely(kmmio_fault(regs, address))) @@ -1281,9 +1278,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) { if (vmalloc_fault(address) >= 0) return; - - if (kmemcheck_fault(regs, address, error_code)) - return; } /* Can handle a stale RO->RW TLB: */ diff --git a/drivers/char/random.c b/drivers/char/random.c index 8ad92707e45f23..ea0115cf5fc0d0 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -259,7 +259,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c index 1922cb8f6b88f3..1c5b7aec13d46a 100644 --- a/drivers/misc/c2port/core.c +++ b/drivers/misc/c2port/core.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -904,7 +903,6 @@ struct c2port_device *c2port_device_register(char *name, return ERR_PTR(-EINVAL); c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL); - kmemcheck_annotate_bitfield(c2dev, flags); if (unlikely(!c2dev)) return ERR_PTR(-ENOMEM); diff --git a/fs/dcache.c b/fs/dcache.c index 34c852af215c0f..b8d999a5768bfe 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2705,8 +2705,6 @@ static void swap_names(struct dentry *dentry, struct dentry *target) */ unsigned int i; BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long))); - kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN); - kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN); for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) { swap(((long *) &dentry->d_iname)[i], ((long *) &target->d_iname)[i]); diff --git a/include/linux/c2port.h b/include/linux/c2port.h index 4efabcb5134712..f2736348ca26e7 100644 --- a/include/linux/c2port.h +++ b/include/linux/c2port.h @@ -9,8 +9,6 @@ * the Free Software Foundation */ -#include - #define C2PORT_NAME_LEN 32 struct device; @@ -22,10 +20,8 @@ struct device; /* Main struct */ struct c2port_ops; struct c2port_device { - kmemcheck_bitfield_begin(flags); unsigned int access:1; unsigned int flash_access:1; - kmemcheck_bitfield_end(flags); int id; char name[C2PORT_NAME_LEN]; diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 46930f82a9888a..7bf3b99e6fbb17 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -9,7 +9,6 @@ #include #include #include -#include #include #include @@ -230,7 +229,6 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; - kmemcheck_mark_initialized(ptr, size); BUG_ON(!valid_dma_direction(dir)); addr = ops->map_page(dev, virt_to_page(ptr), offset_in_page(ptr), size, @@ -263,11 +261,8 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, unsigned long attrs) { const struct dma_map_ops *ops = get_dma_ops(dev); - int i, ents; - struct scatterlist *s; + int ents; - for_each_sg(sg, s, nents, i) - kmemcheck_mark_initialized(sg_virt(s), s->length); BUG_ON(!valid_dma_direction(dir)); ents = ops->map_sg(dev, sg, nents, dir, attrs); BUG_ON(ents < 0); @@ -297,7 +292,6 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev, const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; - kmemcheck_mark_initialized(page_address(page) + offset, size); BUG_ON(!valid_dma_direction(dir)); addr = ops->map_page(dev, page, offset, size, dir, attrs); debug_dma_map_page(dev, page, offset, size, dir, addr, false); diff --git a/include/linux/filter.h b/include/linux/filter.h index 48ec57e70f9f3d..42197b16dd7869 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -454,13 +454,11 @@ struct bpf_binary_header { struct bpf_prog { u16 pages; /* Number of allocated pages */ - kmemcheck_bitfield_begin(meta); u16 jited:1, /* Is our filter JIT'ed? */ locked:1, /* Program image locked? */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ dst_needed:1; /* Do we need dst entry? */ - kmemcheck_bitfield_end(meta); enum bpf_prog_type type; /* Type of BPF program */ u32 len; /* Number of filter blocks */ u32 jited_len; /* Size of jited insns in bytes */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index c85f11dafd5606..9f0bb908e2b5f8 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -207,14 +207,6 @@ struct page { not kmapped, ie. highmem) */ #endif /* WANT_PAGE_VIRTUAL */ -#ifdef CONFIG_KMEMCHECK - /* - * kmemcheck wants to track the status of each byte in a page; this - * is a pointer to such a status block. NULL if not tracked. - */ - void *shadow; -#endif - #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS int _last_cpupid; #endif diff --git a/include/linux/net.h b/include/linux/net.h index d97d80d7fdf8a9..caeb159abda508 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -22,7 +22,6 @@ #include #include #include /* For O_CLOEXEC and O_NONBLOCK */ -#include #include #include #include @@ -111,9 +110,7 @@ struct socket_wq { struct socket { socket_state state; - kmemcheck_bitfield_begin(type); short type; - kmemcheck_bitfield_end(type); unsigned long flags; diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index fa6ace66fea5e3..289e4d54e3e05e 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -2,7 +2,6 @@ #ifndef _LINUX_RING_BUFFER_H #define _LINUX_RING_BUFFER_H -#include #include #include #include @@ -14,9 +13,7 @@ struct ring_buffer_iter; * Don't refer to this struct directly, use functions below. */ struct ring_buffer_event { - kmemcheck_bitfield_begin(bitfield); u32 type_len:5, time_delta:27; - kmemcheck_bitfield_end(bitfield); u32 array[]; }; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 051e0939ec190a..be45224b01d788 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -15,7 +15,6 @@ #define _LINUX_SKBUFF_H #include -#include #include #include #include @@ -706,7 +705,6 @@ struct sk_buff { /* Following fields are _not_ copied in __copy_skb_header() * Note that queue_mapping is here mostly to fill a hole. */ - kmemcheck_bitfield_begin(flags1); __u16 queue_mapping; /* if you move cloned around you also must adapt those constants */ @@ -725,7 +723,6 @@ struct sk_buff { head_frag:1, xmit_more:1, __unused:1; /* one bit hole */ - kmemcheck_bitfield_end(flags1); /* fields enclosed in headers_start/headers_end are copied * using a single memcpy() in __copy_skb_header() diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index db8162dd8c0bcb..8e51b4a69088c2 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -17,7 +17,6 @@ #define _INET_SOCK_H #include -#include #include #include #include @@ -84,7 +83,6 @@ struct inet_request_sock { #define ireq_state req.__req_common.skc_state #define ireq_family req.__req_common.skc_family - kmemcheck_bitfield_begin(flags); u16 snd_wscale : 4, rcv_wscale : 4, tstamp_ok : 1, @@ -93,7 +91,6 @@ struct inet_request_sock { ecn_ok : 1, acked : 1, no_srccheck: 1; - kmemcheck_bitfield_end(flags); u32 ir_mark; union { struct ip_options_rcu __rcu *ireq_opt; diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 6a75d67a30fd80..1356fa6a7566bf 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -15,8 +15,6 @@ #ifndef _INET_TIMEWAIT_SOCK_ #define _INET_TIMEWAIT_SOCK_ - -#include #include #include #include @@ -69,14 +67,12 @@ struct inet_timewait_sock { /* Socket demultiplex comparisons on incoming packets. */ /* these three are in inet_sock */ __be16 tw_sport; - kmemcheck_bitfield_begin(flags); /* And these are ours. */ unsigned int tw_kill : 1, tw_transparent : 1, tw_flowlabel : 20, tw_pad : 2, /* 2 bits hole */ tw_tos : 8; - kmemcheck_bitfield_end(flags); struct timer_list tw_timer; struct inet_bind_bucket *tw_tb; }; diff --git a/include/net/sock.h b/include/net/sock.h index 006580155a87e3..9bd5d68076d9f8 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -436,7 +436,6 @@ struct sock { #define SK_FL_TYPE_MASK 0xffff0000 #endif - kmemcheck_bitfield_begin(flags); unsigned int sk_padding : 1, sk_kern_sock : 1, sk_no_check_tx : 1, @@ -445,8 +444,6 @@ struct sock { sk_protocol : 8, sk_type : 16; #define SK_PROTOCOL_MAX U8_MAX - kmemcheck_bitfield_end(flags); - u16 sk_gso_max_segs; unsigned long sk_lingertime; struct proto *sk_prot_creator; diff --git a/init/main.c b/init/main.c index b32ec72cdf3dd8..2d355a61dfc51a 100644 --- a/init/main.c +++ b/init/main.c @@ -69,7 +69,6 @@ #include #include #include -#include #include #include #include diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 2246115365d99f..d203a5d6b726d6 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -85,8 +85,6 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) if (fp == NULL) return NULL; - kmemcheck_annotate_bitfield(fp, meta); - aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags); if (aux == NULL) { vfree(fp); @@ -127,8 +125,6 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, if (fp == NULL) { __bpf_prog_uncharge(fp_old->aux->user, delta); } else { - kmemcheck_annotate_bitfield(fp, meta); - memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); fp->pages = pages; fp->aux->prog = fp; @@ -662,8 +658,6 @@ static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL); if (fp != NULL) { - kmemcheck_annotate_bitfield(fp, meta); - /* aux->prog still points to the fp_other one, so * when promoting the clone to the real program, * this still needs to be adapted. diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index e36e652d996fe6..4d362d3e457193 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include @@ -3225,8 +3224,6 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name, { int i; - kmemcheck_mark_initialized(lock, sizeof(*lock)); - for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) lock->class_cache[i] = NULL; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 0476a937201476..39c22145418636 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -13,7 +13,6 @@ #include #include #include /* for self test */ -#include #include #include #include @@ -2059,7 +2058,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, } event = __rb_page_index(tail_page, tail); - kmemcheck_annotate_bitfield(event, bitfield); /* account for padding bytes */ local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); @@ -2690,7 +2688,6 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, /* We reserved something on the buffer */ event = __rb_page_index(tail_page, tail); - kmemcheck_annotate_bitfield(event, bitfield); rb_update_event(cpu_buffer, event, info); local_inc(&tail_page->entries); diff --git a/mm/kmemleak.c b/mm/kmemleak.c index a1ba553816eb39..bd1374f402cdac 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -110,7 +110,6 @@ #include #include -#include #include #include @@ -1238,9 +1237,6 @@ static bool update_checksum(struct kmemleak_object *object) { u32 old_csum = object->checksum; - if (!kmemcheck_is_obj_initialized(object->pointer, object->size)) - return false; - kasan_disable_current(); object->checksum = crc32(0, (void *)object->pointer, object->size); kasan_enable_current(); @@ -1314,11 +1310,6 @@ static void scan_block(void *_start, void *_end, if (scan_should_stop()) break; - /* don't scan uninitialized memory */ - if (!kmemcheck_is_obj_initialized((unsigned long)ptr, - BYTES_PER_POINTER)) - continue; - kasan_disable_current(); pointer = *ptr; kasan_enable_current(); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2de080003693ce..6627caeeaf8245 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -1022,7 +1021,6 @@ static __always_inline bool free_pages_prepare(struct page *page, VM_BUG_ON_PAGE(PageTail(page), page); trace_mm_page_free(page, order); - kmemcheck_free_shadow(page, order); /* * Check tail pages before head page information is cleared to @@ -2674,15 +2672,6 @@ void split_page(struct page *page, unsigned int order) VM_BUG_ON_PAGE(PageCompound(page), page); VM_BUG_ON_PAGE(!page_count(page), page); -#ifdef CONFIG_KMEMCHECK - /* - * Split shadow pages too, because free(page[0]) would - * otherwise free the whole shadow. - */ - if (kmemcheck_page_is_tracked(page)) - split_page(virt_to_page(page[0].shadow), order); -#endif - for (i = 1; i < (1 << order); i++) set_page_refcounted(page + i); split_page_owner(page, order); @@ -4228,9 +4217,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, page = NULL; } - if (kmemcheck_enabled && page) - kmemcheck_pagealloc_alloc(page, order, gfp_mask); - trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); return page; diff --git a/mm/slab.c b/mm/slab.c index b7095884fd93fa..50713827b3ed05 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -114,7 +114,6 @@ #include #include #include -#include #include #include #include @@ -1435,15 +1434,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, if (sk_memalloc_socks() && page_is_pfmemalloc(page)) SetPageSlabPfmemalloc(page); - if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { - kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); - - if (cachep->ctor) - kmemcheck_mark_uninitialized_pages(page, nr_pages); - else - kmemcheck_mark_unallocated_pages(page, nr_pages); - } - return page; } @@ -1455,8 +1445,6 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page) int order = cachep->gfporder; unsigned long nr_freed = (1 << order); - kmemcheck_free_shadow(page, order); - if (cachep->flags & SLAB_RECLAIM_ACCOUNT) mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed); else @@ -3516,8 +3504,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp, kmemleak_free_recursive(objp, cachep->flags); objp = cache_free_debugcheck(cachep, objp, caller); - kmemcheck_slab_free(cachep, objp, cachep->object_size); - /* * Skip calling cache_free_alien() when the platform is not numa. * This will avoid cache misses that happen while accessing slabp (which diff --git a/mm/slab.h b/mm/slab.h index 86d7c7d860f92c..438e4ace4d9dc8 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -40,7 +40,6 @@ struct kmem_cache { #include #include -#include #include #include #include @@ -439,7 +438,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, for (i = 0; i < size; i++) { void *object = p[i]; - kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); kasan_slab_alloc(s, object, flags); diff --git a/mm/slub.c b/mm/slub.c index 8e1c027a30f4d0..2de2fc3adbd238 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -1375,7 +1374,6 @@ static inline void *slab_free_hook(struct kmem_cache *s, void *x) unsigned long flags; local_irq_save(flags); - kmemcheck_slab_free(s, x, s->object_size); debug_check_no_locks_freed(x, s->object_size); local_irq_restore(flags); } @@ -1596,22 +1594,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) stat(s, ORDER_FALLBACK); } - if (kmemcheck_enabled && - !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { - int pages = 1 << oo_order(oo); - - kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node); - - /* - * Objects from caches that have a constructor don't get - * cleared when they're allocated, so we need to do it here. - */ - if (s->ctor) - kmemcheck_mark_uninitialized_pages(page, pages); - else - kmemcheck_mark_unallocated_pages(page, pages); - } - page->objects = oo_objects(oo); order = compound_order(page); @@ -1687,8 +1669,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page) check_object(s, page, p, SLUB_RED_INACTIVE); } - kmemcheck_free_shadow(page, compound_order(page)); - mod_lruvec_page_state(page, (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 15fa5baa8faef7..cc811add68c642 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -41,7 +41,6 @@ #include #include #include -#include #include #include #include @@ -234,14 +233,12 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, shinfo = skb_shinfo(skb); memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); atomic_set(&shinfo->dataref, 1); - kmemcheck_annotate_variable(shinfo->destructor_arg); if (flags & SKB_ALLOC_FCLONE) { struct sk_buff_fclones *fclones; fclones = container_of(skb, struct sk_buff_fclones, skb1); - kmemcheck_annotate_bitfield(&fclones->skb2, flags1); skb->fclone = SKB_FCLONE_ORIG; refcount_set(&fclones->fclone_ref, 1); @@ -301,7 +298,6 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size) shinfo = skb_shinfo(skb); memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); atomic_set(&shinfo->dataref, 1); - kmemcheck_annotate_variable(shinfo->destructor_arg); return skb; } @@ -1284,7 +1280,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) if (!n) return NULL; - kmemcheck_annotate_bitfield(n, flags1); n->fclone = SKB_FCLONE_UNAVAILABLE; } diff --git a/net/core/sock.c b/net/core/sock.c index beb1e299fed385..ec6eb546b22842 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1469,8 +1469,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, sk = kmalloc(prot->obj_size, priority); if (sk != NULL) { - kmemcheck_annotate_bitfield(sk, flags); - if (security_sk_alloc(sk, family, priority)) goto out_free; diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 5b039159e67a60..d451b9f19b59da 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -9,7 +9,6 @@ */ #include -#include #include #include #include @@ -167,8 +166,6 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, if (tw) { const struct inet_sock *inet = inet_sk(sk); - kmemcheck_annotate_bitfield(tw, flags); - tw->tw_dr = dr; /* Give us an identity. */ tw->tw_daddr = inet->inet_daddr; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ff48ac654e5ae1..d9d215e27b8ab0 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6204,7 +6204,6 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, if (req) { struct inet_request_sock *ireq = inet_rsk(req); - kmemcheck_annotate_bitfield(ireq, flags); ireq->ireq_opt = NULL; #if IS_ENABLED(CONFIG_IPV6) ireq->pktopts = NULL; diff --git a/net/socket.c b/net/socket.c index d894c7c5fa5498..43d2f17f5eeace 100644 --- a/net/socket.c +++ b/net/socket.c @@ -568,7 +568,6 @@ struct socket *sock_alloc(void) sock = SOCKET_I(inode); - kmemcheck_annotate_bitfield(sock, type); inode->i_ino = get_next_ino(); inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); From ae63fd26b2d81eaaf3cf35693dc01c0148bd7a9b Mon Sep 17 00:00:00 2001 From: "Levin, Alexander (Sasha Levin)" Date: Wed, 15 Nov 2017 17:35:54 -0800 Subject: [PATCH 2059/3326] kmemcheck: stop using GFP_NOTRACK and SLAB_NOTRACK commit 75f296d93bcebcfe375884ddac79e30263a31766 upstream. Convert all allocations that used a NOTRACK flag to stop using it. Link: http://lkml.kernel.org/r/20171007030159.22241-3-alexander.levin@verizon.com Signed-off-by: Sasha Levin Cc: Alexander Potapenko Cc: Eric W. Biederman Cc: Michal Hocko Cc: Pekka Enberg Cc: Steven Rostedt Cc: Tim Hansen Cc: Vegard Nossum Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/pgalloc.h | 2 +- arch/arm64/include/asm/pgalloc.h | 2 +- arch/powerpc/include/asm/pgalloc.h | 2 +- arch/sh/kernel/dwarf.c | 4 ++-- arch/sh/kernel/process.c | 2 +- arch/sparc/mm/init_64.c | 4 ++-- arch/unicore32/include/asm/pgalloc.h | 2 +- arch/x86/kernel/espfix_64.c | 2 +- arch/x86/mm/init.c | 3 +-- arch/x86/mm/init_64.c | 2 +- arch/x86/mm/pageattr.c | 10 +++++----- arch/x86/mm/pgtable.c | 2 +- arch/x86/platform/efi/efi_64.c | 2 +- crypto/xor.c | 7 +------ include/linux/thread_info.h | 5 ++--- init/do_mounts.c | 3 +-- kernel/fork.c | 12 ++++++------ kernel/signal.c | 3 +-- mm/kmemcheck.c | 2 +- mm/slab.c | 2 +- mm/slab.h | 5 ++--- mm/slab_common.c | 2 +- mm/slub.c | 4 +--- 23 files changed, 36 insertions(+), 48 deletions(-) diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index b2902a5cd780fd..2d7344f0e2085b 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -57,7 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) extern pgd_t *pgd_alloc(struct mm_struct *mm); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) static inline void clean_pte_table(pte_t *pte) { diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index d25f4f137c2aa1..5ca6a573a70124 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -26,7 +26,7 @@ #define check_pgt_cache() do { } while (0) -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) #if CONFIG_PGTABLE_LEVELS > 2 diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index a14203c005f1a4..e11f03007b5753 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -18,7 +18,7 @@ static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp) } #endif /* MODULE */ -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) #ifdef CONFIG_PPC_BOOK3S #include diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index e1d751ae2498af..1a2526676a8729 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -1172,11 +1172,11 @@ static int __init dwarf_unwinder_init(void) dwarf_frame_cachep = kmem_cache_create("dwarf_frames", sizeof(struct dwarf_frame), 0, - SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); + SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL); dwarf_reg_cachep = kmem_cache_create("dwarf_regs", sizeof(struct dwarf_reg), 0, - SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); + SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL); dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ, dwarf_frame_cachep); diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index b2d9963d5978ba..68b1a67533cea1 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c @@ -59,7 +59,7 @@ void arch_task_cache_init(void) task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, __alignof__(union thread_xstate), - SLAB_PANIC | SLAB_NOTRACK, NULL); + SLAB_PANIC, NULL); } #ifdef CONFIG_SH_FPU_EMU diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index a0cc1be767c8f5..984e9d65ea0d14 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2934,7 +2934,7 @@ void __flush_tlb_all(void) pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); pte_t *pte = NULL; if (page) @@ -2946,7 +2946,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) return NULL; if (!pgtable_page_ctor(page)) { diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h index 26775793c204e7..f0fdb268f8f2ef 100644 --- a/arch/unicore32/include/asm/pgalloc.h +++ b/arch/unicore32/include/asm/pgalloc.h @@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); #define pgd_alloc(mm) get_pgd_slow(mm) #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) /* * Allocate one PTE table. diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 9c4e7ba6870c14..cbded50ee6010a 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c @@ -57,7 +57,7 @@ # error "Need more virtual address space for the ESPFIX hack" #endif -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) /* This contains the *bottom* address of the espfix stack */ DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 6b462a472a7bfb..fcf38a2a4e7e6a 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -93,8 +93,7 @@ __ref void *alloc_low_pages(unsigned int num) unsigned int order; order = get_order((unsigned long)num << PAGE_SHIFT); - return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK | - __GFP_ZERO, order); + return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); } if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index adcea90a2046e9..5fa3a58b5d7865 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -184,7 +184,7 @@ static __ref void *spp_getpage(void) void *ptr; if (after_bootmem) - ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); + ptr = (void *) get_zeroed_page(GFP_ATOMIC); else ptr = alloc_bootmem_pages(PAGE_SIZE); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index dfb7d657cf4322..3ed9a08885c560 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -753,7 +753,7 @@ static int split_large_page(struct cpa_data *cpa, pte_t *kpte, if (!debug_pagealloc_enabled()) spin_unlock(&cpa_lock); - base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0); + base = alloc_pages(GFP_KERNEL, 0); if (!debug_pagealloc_enabled()) spin_lock(&cpa_lock); if (!base) @@ -904,7 +904,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end) static int alloc_pte_page(pmd_t *pmd) { - pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL); if (!pte) return -1; @@ -914,7 +914,7 @@ static int alloc_pte_page(pmd_t *pmd) static int alloc_pmd_page(pud_t *pud) { - pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); if (!pmd) return -1; @@ -1120,7 +1120,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) pgd_entry = cpa->pgd + pgd_index(addr); if (pgd_none(*pgd_entry)) { - p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL); if (!p4d) return -1; @@ -1132,7 +1132,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) */ p4d = p4d_offset(pgd_entry, addr); if (p4d_none(*p4d)) { - pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + pud = (pud_t *)get_zeroed_page(GFP_KERNEL); if (!pud) return -1; diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 9b7bcbd33cc246..004abf9ebf1222 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -7,7 +7,7 @@ #include #include -#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) #ifdef CONFIG_HIGHPTE #define PGALLOC_USER_GFP __GFP_HIGHMEM diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 61975b6bcb1a0a..ad5d9538f0f91e 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -211,7 +211,7 @@ int __init efi_alloc_page_tables(void) if (efi_enabled(EFI_OLD_MEMMAP)) return 0; - gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO; + gfp_mask = GFP_KERNEL | __GFP_ZERO; efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER); if (!efi_pgd) return -ENOMEM; diff --git a/crypto/xor.c b/crypto/xor.c index 263af9fb45ea28..bce9fe7af40ad4 100644 --- a/crypto/xor.c +++ b/crypto/xor.c @@ -122,12 +122,7 @@ calibrate_xor_blocks(void) goto out; } - /* - * Note: Since the memory is not actually used for _anything_ but to - * test the XOR speed, we don't really want kmemcheck to warn about - * reading uninitialized bytes here. - */ - b1 = (void *) __get_free_pages(GFP_KERNEL | __GFP_NOTRACK, 2); + b1 = (void *) __get_free_pages(GFP_KERNEL, 2); if (!b1) { printk(KERN_WARNING "xor: Yikes! No memory available.\n"); return -ENOMEM; diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 4bcdf00c110fb1..34f053a150a969 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -44,10 +44,9 @@ enum { #endif #if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ - __GFP_ZERO) +# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) #else -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK) +# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT) #endif /* diff --git a/init/do_mounts.c b/init/do_mounts.c index f6d4dd764a5248..7cf4f6dafd5f32 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -380,8 +380,7 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data) void __init mount_block_root(char *name, int flags) { - struct page *page = alloc_page(GFP_KERNEL | - __GFP_NOTRACK_FALSE_POSITIVE); + struct page *page = alloc_page(GFP_KERNEL); char *fs_names = page_address(page); char *p; #ifdef CONFIG_BLOCK diff --git a/kernel/fork.c b/kernel/fork.c index 500ce64517d93e..98c91bd341b4ad 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -469,7 +469,7 @@ void __init fork_init(void) /* create a slab on which task_structs can be allocated */ task_struct_cachep = kmem_cache_create("task_struct", arch_task_struct_size, align, - SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL); + SLAB_PANIC|SLAB_ACCOUNT, NULL); #endif /* do the arch specific task caches init */ @@ -2208,18 +2208,18 @@ void __init proc_caches_init(void) sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| - SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor); + SLAB_ACCOUNT, sighand_ctor); signal_cachep = kmem_cache_create("signal_cache", sizeof(struct signal_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); files_cachep = kmem_cache_create("files_cache", sizeof(struct files_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); fs_cachep = kmem_cache_create("fs_cache", sizeof(struct fs_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); /* * FIXME! The "sizeof(struct mm_struct)" currently includes the @@ -2230,7 +2230,7 @@ void __init proc_caches_init(void) */ mm_cachep = kmem_cache_create("mm_struct", sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); mmap_init(); diff --git a/kernel/signal.c b/kernel/signal.c index 1facff1dbbaecb..6895f6bb98a769 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1038,8 +1038,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, else override_rlimit = 0; - q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, - override_rlimit); + q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit); if (q) { list_add_tail(&q->list, &pending->list); switch ((unsigned long) info) { diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c index 800d64b854ea66..b3a4d61d341c6a 100644 --- a/mm/kmemcheck.c +++ b/mm/kmemcheck.c @@ -18,7 +18,7 @@ void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) * With kmemcheck enabled, we need to allocate a memory area for the * shadow bits as well. */ - shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); + shadow = alloc_pages_node(node, flags, order); if (!shadow) { if (printk_ratelimit()) pr_err("kmemcheck: failed to allocate shadow bitmap\n"); diff --git a/mm/slab.c b/mm/slab.c index 50713827b3ed05..966839a1ac2cec 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1412,7 +1412,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, if (cachep->flags & SLAB_RECLAIM_ACCOUNT) flags |= __GFP_RECLAIMABLE; - page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); + page = __alloc_pages_node(nodeid, flags, cachep->gfporder); if (!page) { slab_out_of_memory(cachep, flags, nodeid); return NULL; diff --git a/mm/slab.h b/mm/slab.h index 438e4ace4d9dc8..485d9fbb8802f8 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -141,10 +141,10 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size, #if defined(CONFIG_SLAB) #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ - SLAB_NOTRACK | SLAB_ACCOUNT) + SLAB_ACCOUNT) #elif defined(CONFIG_SLUB) #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ - SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT) + SLAB_TEMPORARY | SLAB_ACCOUNT) #else #define SLAB_CACHE_FLAGS (0) #endif @@ -163,7 +163,6 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size, SLAB_NOLEAKTRACE | \ SLAB_RECLAIM_ACCOUNT | \ SLAB_TEMPORARY | \ - SLAB_NOTRACK | \ SLAB_ACCOUNT) int __kmem_cache_shutdown(struct kmem_cache *); diff --git a/mm/slab_common.c b/mm/slab_common.c index 0d7fe71ff5e428..65212caa1f2a34 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -44,7 +44,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work, SLAB_FAILSLAB | SLAB_KASAN) #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ - SLAB_NOTRACK | SLAB_ACCOUNT) + SLAB_ACCOUNT) /* * Merge control. If this is set then no merging of slab caches will occur. diff --git a/mm/slub.c b/mm/slub.c index 2de2fc3adbd238..6eeffac300254d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1434,8 +1434,6 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, struct page *page; int order = oo_order(oo); - flags |= __GFP_NOTRACK; - if (node == NUMA_NO_NODE) page = alloc_pages(flags, order); else @@ -3772,7 +3770,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) struct page *page; void *ptr = NULL; - flags |= __GFP_COMP | __GFP_NOTRACK; + flags |= __GFP_COMP; page = alloc_pages_node(node, flags, get_order(size)); if (page) ptr = page_address(page); From b9870f85817ebb331d7496d50a0b99088e240e20 Mon Sep 17 00:00:00 2001 From: "Levin, Alexander (Sasha Levin)" Date: Wed, 15 Nov 2017 17:35:58 -0800 Subject: [PATCH 2060/3326] kmemcheck: remove whats left of NOTRACK flags commit d8be75663cec0069b85f80191abd2682ce4a512f upstream. Now that kmemcheck is gone, we don't need the NOTRACK flags. Link: http://lkml.kernel.org/r/20171007030159.22241-5-alexander.levin@verizon.com Signed-off-by: Sasha Levin Cc: Alexander Potapenko Cc: Eric W. Biederman Cc: Michal Hocko Cc: Pekka Enberg Cc: Steven Rostedt Cc: Tim Hansen Cc: Vegard Nossum Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/pgtable.h | 5 ----- arch/x86/include/asm/pgtable_types.h | 13 ------------- include/linux/gfp.h | 9 --------- include/linux/slab.h | 6 ------ include/trace/events/mmflags.h | 1 - mm/slub.c | 2 -- tools/perf/builtin-kmem.c | 1 - 7 files changed, 37 deletions(-) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 211368922cad67..8b8f1f14a0bfc0 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -668,11 +668,6 @@ static inline bool pte_accessible(struct mm_struct *mm, pte_t a) return false; } -static inline int pte_hidden(pte_t pte) -{ - return pte_flags(pte) & _PAGE_HIDDEN; -} - static inline int pmd_present(pmd_t pmd) { /* diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 9e9b05fc4860e7..3696398a9475fe 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -32,7 +32,6 @@ #define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1 -#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */ #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ #define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4 @@ -79,18 +78,6 @@ #define _PAGE_KNL_ERRATUM_MASK 0 #endif -#ifdef CONFIG_KMEMCHECK -#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) -#else -#define _PAGE_HIDDEN (_AT(pteval_t, 0)) -#endif - -/* - * The same hidden bit is used by kmemcheck, but since kmemcheck - * works on kernel pages while soft-dirty engine on user space, - * they do not conflict with each other. - */ - #ifdef CONFIG_MEM_SOFT_DIRTY #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) #else diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 710143741eb57a..b041f94678de5a 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -37,7 +37,6 @@ struct vm_area_struct; #define ___GFP_THISNODE 0x40000u #define ___GFP_ATOMIC 0x80000u #define ___GFP_ACCOUNT 0x100000u -#define ___GFP_NOTRACK 0x200000u #define ___GFP_DIRECT_RECLAIM 0x400000u #define ___GFP_WRITE 0x800000u #define ___GFP_KSWAPD_RECLAIM 0x1000000u @@ -201,19 +200,11 @@ struct vm_area_struct; * __GFP_COMP address compound page metadata. * * __GFP_ZERO returns a zeroed page on success. - * - * __GFP_NOTRACK avoids tracking with kmemcheck. - * - * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of - * distinguishing in the source between false positives and allocations that - * cannot be supported (e.g. page tables). */ #define __GFP_COLD ((__force gfp_t)___GFP_COLD) #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) #define __GFP_COMP ((__force gfp_t)___GFP_COMP) #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) -#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) -#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) /* Disable lockdep for GFP context tracking */ #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) diff --git a/include/linux/slab.h b/include/linux/slab.h index af5aa65c7c18c1..ae5ed6492d5410 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -78,12 +78,6 @@ #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ -/* Don't track use of uninitialized memory */ -#ifdef CONFIG_KMEMCHECK -# define SLAB_NOTRACK 0x01000000UL -#else -# define SLAB_NOTRACK 0x00000000UL -#endif #ifdef CONFIG_FAILSLAB # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ #else diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 648cbf60373683..72162f3a03fac2 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -46,7 +46,6 @@ {(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \ {(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \ {(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \ - {(unsigned long)__GFP_NOTRACK, "__GFP_NOTRACK"}, \ {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \ {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ diff --git a/mm/slub.c b/mm/slub.c index 6eeffac300254d..38acb5c114edab 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5633,8 +5633,6 @@ static char *create_unique_id(struct kmem_cache *s) *p++ = 'a'; if (s->flags & SLAB_CONSISTENCY_CHECKS) *p++ = 'F'; - if (!(s->flags & SLAB_NOTRACK)) - *p++ = 't'; if (s->flags & SLAB_ACCOUNT) *p++ = 'A'; if (p != name + 1) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 35d4b9c9a9e8ff..9e693ce4b73b0a 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -655,7 +655,6 @@ static const struct { { "__GFP_RECLAIMABLE", "RC" }, { "__GFP_MOVABLE", "M" }, { "__GFP_ACCOUNT", "AC" }, - { "__GFP_NOTRACK", "NT" }, { "__GFP_WRITE", "WR" }, { "__GFP_RECLAIM", "R" }, { "__GFP_DIRECT_RECLAIM", "DR" }, From f369f1486116b0f3e9630a2481addde6854df541 Mon Sep 17 00:00:00 2001 From: "Levin, Alexander (Sasha Levin)" Date: Wed, 15 Nov 2017 17:36:02 -0800 Subject: [PATCH 2061/3326] kmemcheck: rip it out commit 4675ff05de2d76d167336b368bd07f3fef6ed5a6 upstream. Fix up makefiles, remove references, and git rm kmemcheck. Link: http://lkml.kernel.org/r/20171007030159.22241-4-alexander.levin@verizon.com Signed-off-by: Sasha Levin Cc: Steven Rostedt Cc: Vegard Nossum Cc: Pekka Enberg Cc: Michal Hocko Cc: Eric W. Biederman Cc: Alexander Potapenko Cc: Tim Hansen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- .../admin-guide/kernel-parameters.txt | 7 - Documentation/dev-tools/index.rst | 1 - Documentation/dev-tools/kmemcheck.rst | 733 ------------------ MAINTAINERS | 10 - arch/x86/Kconfig | 3 +- arch/x86/include/asm/kmemcheck.h | 42 - arch/x86/include/asm/string_32.h | 9 - arch/x86/include/asm/string_64.h | 8 - arch/x86/kernel/cpu/intel.c | 15 - arch/x86/mm/Makefile | 2 - arch/x86/mm/init.c | 5 +- arch/x86/mm/kmemcheck/Makefile | 1 - arch/x86/mm/kmemcheck/error.c | 227 ------ arch/x86/mm/kmemcheck/error.h | 15 - arch/x86/mm/kmemcheck/kmemcheck.c | 658 ---------------- arch/x86/mm/kmemcheck/opcode.c | 106 --- arch/x86/mm/kmemcheck/opcode.h | 9 - arch/x86/mm/kmemcheck/pte.c | 22 - arch/x86/mm/kmemcheck/pte.h | 10 - arch/x86/mm/kmemcheck/selftest.c | 70 -- arch/x86/mm/kmemcheck/selftest.h | 6 - arch/x86/mm/kmemcheck/shadow.c | 173 ----- arch/x86/mm/kmemcheck/shadow.h | 18 - include/linux/interrupt.h | 15 - include/linux/kmemcheck.h | 171 ---- kernel/softirq.c | 10 - kernel/sysctl.c | 10 - lib/Kconfig.debug | 6 +- lib/Kconfig.kmemcheck | 94 --- mm/Kconfig.debug | 1 - mm/Makefile | 2 - mm/kmemcheck.c | 125 --- mm/slub.c | 5 +- scripts/kernel-doc | 2 - tools/include/linux/kmemcheck.h | 8 - 35 files changed, 7 insertions(+), 2592 deletions(-) delete mode 100644 Documentation/dev-tools/kmemcheck.rst delete mode 100644 arch/x86/mm/kmemcheck/Makefile delete mode 100644 arch/x86/mm/kmemcheck/kmemcheck.c delete mode 100644 arch/x86/mm/kmemcheck/shadow.c delete mode 100644 lib/Kconfig.kmemcheck diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index c76afdcafbef0d..fb385af482ff79 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1841,13 +1841,6 @@ Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y, the default is off. - kmemcheck= [X86] Boot-time kmemcheck enable/disable/one-shot mode - Valid arguments: 0, 1, 2 - kmemcheck=0 (disabled) - kmemcheck=1 (enabled) - kmemcheck=2 (one-shot mode) - Default: 2 (one-shot mode) - kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs. Default is 0 (don't ignore, but inject #GP) diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst index a81787cd47d793..e313925fb0fa50 100644 --- a/Documentation/dev-tools/index.rst +++ b/Documentation/dev-tools/index.rst @@ -21,7 +21,6 @@ whole; patches welcome! kasan ubsan kmemleak - kmemcheck gdb-kernel-debugging kgdb kselftest diff --git a/Documentation/dev-tools/kmemcheck.rst b/Documentation/dev-tools/kmemcheck.rst deleted file mode 100644 index 7f3d1985de743f..00000000000000 --- a/Documentation/dev-tools/kmemcheck.rst +++ /dev/null @@ -1,733 +0,0 @@ -Getting started with kmemcheck -============================== - -Vegard Nossum - - -Introduction ------------- - -kmemcheck is a debugging feature for the Linux Kernel. More specifically, it -is a dynamic checker that detects and warns about some uses of uninitialized -memory. - -Userspace programmers might be familiar with Valgrind's memcheck. The main -difference between memcheck and kmemcheck is that memcheck works for userspace -programs only, and kmemcheck works for the kernel only. The implementations -are of course vastly different. Because of this, kmemcheck is not as accurate -as memcheck, but it turns out to be good enough in practice to discover real -programmer errors that the compiler is not able to find through static -analysis. - -Enabling kmemcheck on a kernel will probably slow it down to the extent that -the machine will not be usable for normal workloads such as e.g. an -interactive desktop. kmemcheck will also cause the kernel to use about twice -as much memory as normal. For this reason, kmemcheck is strictly a debugging -feature. - - -Downloading ------------ - -As of version 2.6.31-rc1, kmemcheck is included in the mainline kernel. - - -Configuring and compiling -------------------------- - -kmemcheck only works for the x86 (both 32- and 64-bit) platform. A number of -configuration variables must have specific settings in order for the kmemcheck -menu to even appear in "menuconfig". These are: - -- ``CONFIG_CC_OPTIMIZE_FOR_SIZE=n`` - This option is located under "General setup" / "Optimize for size". - - Without this, gcc will use certain optimizations that usually lead to - false positive warnings from kmemcheck. An example of this is a 16-bit - field in a struct, where gcc may load 32 bits, then discard the upper - 16 bits. kmemcheck sees only the 32-bit load, and may trigger a - warning for the upper 16 bits (if they're uninitialized). - -- ``CONFIG_SLAB=y`` or ``CONFIG_SLUB=y`` - This option is located under "General setup" / "Choose SLAB - allocator". - -- ``CONFIG_FUNCTION_TRACER=n`` - This option is located under "Kernel hacking" / "Tracers" / "Kernel - Function Tracer" - - When function tracing is compiled in, gcc emits a call to another - function at the beginning of every function. This means that when the - page fault handler is called, the ftrace framework will be called - before kmemcheck has had a chance to handle the fault. If ftrace then - modifies memory that was tracked by kmemcheck, the result is an - endless recursive page fault. - -- ``CONFIG_DEBUG_PAGEALLOC=n`` - This option is located under "Kernel hacking" / "Memory Debugging" - / "Debug page memory allocations". - -In addition, I highly recommend turning on ``CONFIG_DEBUG_INFO=y``. This is also -located under "Kernel hacking". With this, you will be able to get line number -information from the kmemcheck warnings, which is extremely valuable in -debugging a problem. This option is not mandatory, however, because it slows -down the compilation process and produces a much bigger kernel image. - -Now the kmemcheck menu should be visible (under "Kernel hacking" / "Memory -Debugging" / "kmemcheck: trap use of uninitialized memory"). Here follows -a description of the kmemcheck configuration variables: - -- ``CONFIG_KMEMCHECK`` - This must be enabled in order to use kmemcheck at all... - -- ``CONFIG_KMEMCHECK_``[``DISABLED`` | ``ENABLED`` | ``ONESHOT``]``_BY_DEFAULT`` - This option controls the status of kmemcheck at boot-time. "Enabled" - will enable kmemcheck right from the start, "disabled" will boot the - kernel as normal (but with the kmemcheck code compiled in, so it can - be enabled at run-time after the kernel has booted), and "one-shot" is - a special mode which will turn kmemcheck off automatically after - detecting the first use of uninitialized memory. - - If you are using kmemcheck to actively debug a problem, then you - probably want to choose "enabled" here. - - The one-shot mode is mostly useful in automated test setups because it - can prevent floods of warnings and increase the chances of the machine - surviving in case something is really wrong. In other cases, the one- - shot mode could actually be counter-productive because it would turn - itself off at the very first error -- in the case of a false positive - too -- and this would come in the way of debugging the specific - problem you were interested in. - - If you would like to use your kernel as normal, but with a chance to - enable kmemcheck in case of some problem, it might be a good idea to - choose "disabled" here. When kmemcheck is disabled, most of the run- - time overhead is not incurred, and the kernel will be almost as fast - as normal. - -- ``CONFIG_KMEMCHECK_QUEUE_SIZE`` - Select the maximum number of error reports to store in an internal - (fixed-size) buffer. Since errors can occur virtually anywhere and in - any context, we need a temporary storage area which is guaranteed not - to generate any other page faults when accessed. The queue will be - emptied as soon as a tasklet may be scheduled. If the queue is full, - new error reports will be lost. - - The default value of 64 is probably fine. If some code produces more - than 64 errors within an irqs-off section, then the code is likely to - produce many, many more, too, and these additional reports seldom give - any more information (the first report is usually the most valuable - anyway). - - This number might have to be adjusted if you are not using serial - console or similar to capture the kernel log. If you are using the - "dmesg" command to save the log, then getting a lot of kmemcheck - warnings might overflow the kernel log itself, and the earlier reports - will get lost in that way instead. Try setting this to 10 or so on - such a setup. - -- ``CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT`` - Select the number of shadow bytes to save along with each entry of the - error-report queue. These bytes indicate what parts of an allocation - are initialized, uninitialized, etc. and will be displayed when an - error is detected to help the debugging of a particular problem. - - The number entered here is actually the logarithm of the number of - bytes that will be saved. So if you pick for example 5 here, kmemcheck - will save 2^5 = 32 bytes. - - The default value should be fine for debugging most problems. It also - fits nicely within 80 columns. - -- ``CONFIG_KMEMCHECK_PARTIAL_OK`` - This option (when enabled) works around certain GCC optimizations that - produce 32-bit reads from 16-bit variables where the upper 16 bits are - thrown away afterwards. - - The default value (enabled) is recommended. This may of course hide - some real errors, but disabling it would probably produce a lot of - false positives. - -- ``CONFIG_KMEMCHECK_BITOPS_OK`` - This option silences warnings that would be generated for bit-field - accesses where not all the bits are initialized at the same time. This - may also hide some real bugs. - - This option is probably obsolete, or it should be replaced with - the kmemcheck-/bitfield-annotations for the code in question. The - default value is therefore fine. - -Now compile the kernel as usual. - - -How to use ----------- - -Booting -~~~~~~~ - -First some information about the command-line options. There is only one -option specific to kmemcheck, and this is called "kmemcheck". It can be used -to override the default mode as chosen by the ``CONFIG_KMEMCHECK_*_BY_DEFAULT`` -option. Its possible settings are: - -- ``kmemcheck=0`` (disabled) -- ``kmemcheck=1`` (enabled) -- ``kmemcheck=2`` (one-shot mode) - -If SLUB debugging has been enabled in the kernel, it may take precedence over -kmemcheck in such a way that the slab caches which are under SLUB debugging -will not be tracked by kmemcheck. In order to ensure that this doesn't happen -(even though it shouldn't by default), use SLUB's boot option ``slub_debug``, -like this: ``slub_debug=-`` - -In fact, this option may also be used for fine-grained control over SLUB vs. -kmemcheck. For example, if the command line includes -``kmemcheck=1 slub_debug=,dentry``, then SLUB debugging will be used only -for the "dentry" slab cache, and with kmemcheck tracking all the other -caches. This is advanced usage, however, and is not generally recommended. - - -Run-time enable/disable -~~~~~~~~~~~~~~~~~~~~~~~ - -When the kernel has booted, it is possible to enable or disable kmemcheck at -run-time. WARNING: This feature is still experimental and may cause false -positive warnings to appear. Therefore, try not to use this. If you find that -it doesn't work properly (e.g. you see an unreasonable amount of warnings), I -will be happy to take bug reports. - -Use the file ``/proc/sys/kernel/kmemcheck`` for this purpose, e.g.:: - - $ echo 0 > /proc/sys/kernel/kmemcheck # disables kmemcheck - -The numbers are the same as for the ``kmemcheck=`` command-line option. - - -Debugging -~~~~~~~~~ - -A typical report will look something like this:: - - WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024) - 80000000000000000000000000000000000000000088ffff0000000000000000 - i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u - ^ - - Pid: 1856, comm: ntpdate Not tainted 2.6.29-rc5 #264 945P-A - RIP: 0010:[] [] __dequeue_signal+0xc8/0x190 - RSP: 0018:ffff88003cdf7d98 EFLAGS: 00210002 - RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009 - RDX: ffff88003e5d6018 RSI: ffff88003e5d6024 RDI: ffff88003cdf7e84 - RBP: ffff88003cdf7db8 R08: ffff88003e5d6000 R09: 0000000000000000 - R10: 0000000000000080 R11: 0000000000000000 R12: 000000000000000e - R13: ffff88003cdf7e78 R14: ffff88003d530710 R15: ffff88003d5a98c8 - FS: 0000000000000000(0000) GS:ffff880001982000(0063) knlGS:00000 - CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033 - CR2: ffff88003f806ea0 CR3: 000000003c036000 CR4: 00000000000006a0 - DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 - DR3: 0000000000000000 DR6: 00000000ffff4ff0 DR7: 0000000000000400 - [] dequeue_signal+0x8e/0x170 - [] get_signal_to_deliver+0x98/0x390 - [] do_notify_resume+0xad/0x7d0 - [] int_signal+0x12/0x17 - [] 0xffffffffffffffff - -The single most valuable information in this report is the RIP (or EIP on 32- -bit) value. This will help us pinpoint exactly which instruction that caused -the warning. - -If your kernel was compiled with ``CONFIG_DEBUG_INFO=y``, then all we have to do -is give this address to the addr2line program, like this:: - - $ addr2line -e vmlinux -i ffffffff8104ede8 - arch/x86/include/asm/string_64.h:12 - include/asm-generic/siginfo.h:287 - kernel/signal.c:380 - kernel/signal.c:410 - -The "``-e vmlinux``" tells addr2line which file to look in. **IMPORTANT:** -This must be the vmlinux of the kernel that produced the warning in the -first place! If not, the line number information will almost certainly be -wrong. - -The "``-i``" tells addr2line to also print the line numbers of inlined -functions. In this case, the flag was very important, because otherwise, -it would only have printed the first line, which is just a call to -``memcpy()``, which could be called from a thousand places in the kernel, and -is therefore not very useful. These inlined functions would not show up in -the stack trace above, simply because the kernel doesn't load the extra -debugging information. This technique can of course be used with ordinary -kernel oopses as well. - -In this case, it's the caller of ``memcpy()`` that is interesting, and it can be -found in ``include/asm-generic/siginfo.h``, line 287:: - - 281 static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) - 282 { - 283 if (from->si_code < 0) - 284 memcpy(to, from, sizeof(*to)); - 285 else - 286 /* _sigchld is currently the largest know union member */ - 287 memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); - 288 } - -Since this was a read (kmemcheck usually warns about reads only, though it can -warn about writes to unallocated or freed memory as well), it was probably the -"from" argument which contained some uninitialized bytes. Following the chain -of calls, we move upwards to see where "from" was allocated or initialized, -``kernel/signal.c``, line 380:: - - 359 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) - 360 { - ... - 367 list_for_each_entry(q, &list->list, list) { - 368 if (q->info.si_signo == sig) { - 369 if (first) - 370 goto still_pending; - 371 first = q; - ... - 377 if (first) { - 378 still_pending: - 379 list_del_init(&first->list); - 380 copy_siginfo(info, &first->info); - 381 __sigqueue_free(first); - ... - 392 } - 393 } - -Here, it is ``&first->info`` that is being passed on to ``copy_siginfo()``. The -variable ``first`` was found on a list -- passed in as the second argument to -``collect_signal()``. We continue our journey through the stack, to figure out -where the item on "list" was allocated or initialized. We move to line 410:: - - 395 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, - 396 siginfo_t *info) - 397 { - ... - 410 collect_signal(sig, pending, info); - ... - 414 } - -Now we need to follow the ``pending`` pointer, since that is being passed on to -``collect_signal()`` as ``list``. At this point, we've run out of lines from the -"addr2line" output. Not to worry, we just paste the next addresses from the -kmemcheck stack dump, i.e.:: - - [] dequeue_signal+0x8e/0x170 - [] get_signal_to_deliver+0x98/0x390 - [] do_notify_resume+0xad/0x7d0 - [] int_signal+0x12/0x17 - - $ addr2line -e vmlinux -i ffffffff8104f04e ffffffff81050bd8 \ - ffffffff8100b87d ffffffff8100c7b5 - kernel/signal.c:446 - kernel/signal.c:1806 - arch/x86/kernel/signal.c:805 - arch/x86/kernel/signal.c:871 - arch/x86/kernel/entry_64.S:694 - -Remember that since these addresses were found on the stack and not as the -RIP value, they actually point to the _next_ instruction (they are return -addresses). This becomes obvious when we look at the code for line 446:: - - 422 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) - 423 { - ... - 431 signr = __dequeue_signal(&tsk->signal->shared_pending, - 432 mask, info); - 433 /* - 434 * itimer signal ? - 435 * - 436 * itimers are process shared and we restart periodic - 437 * itimers in the signal delivery path to prevent DoS - 438 * attacks in the high resolution timer case. This is - 439 * compliant with the old way of self restarting - 440 * itimers, as the SIGALRM is a legacy signal and only - 441 * queued once. Changing the restart behaviour to - 442 * restart the timer in the signal dequeue path is - 443 * reducing the timer noise on heavy loaded !highres - 444 * systems too. - 445 */ - 446 if (unlikely(signr == SIGALRM)) { - ... - 489 } - -So instead of looking at 446, we should be looking at 431, which is the line -that executes just before 446. Here we see that what we are looking for is -``&tsk->signal->shared_pending``. - -Our next task is now to figure out which function that puts items on this -``shared_pending`` list. A crude, but efficient tool, is ``git grep``:: - - $ git grep -n 'shared_pending' kernel/ - ... - kernel/signal.c:828: pending = group ? &t->signal->shared_pending : &t->pending; - kernel/signal.c:1339: pending = group ? &t->signal->shared_pending : &t->pending; - ... - -There were more results, but none of them were related to list operations, -and these were the only assignments. We inspect the line numbers more closely -and find that this is indeed where items are being added to the list:: - - 816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, - 817 int group) - 818 { - ... - 828 pending = group ? &t->signal->shared_pending : &t->pending; - ... - 851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && - 852 (is_si_special(info) || - 853 info->si_code >= 0))); - 854 if (q) { - 855 list_add_tail(&q->list, &pending->list); - ... - 890 } - -and:: - - 1309 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) - 1310 { - .... - 1339 pending = group ? &t->signal->shared_pending : &t->pending; - 1340 list_add_tail(&q->list, &pending->list); - .... - 1347 } - -In the first case, the list element we are looking for, ``q``, is being -returned from the function ``__sigqueue_alloc()``, which looks like an -allocation function. Let's take a look at it:: - - 187 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, - 188 int override_rlimit) - 189 { - 190 struct sigqueue *q = NULL; - 191 struct user_struct *user; - 192 - 193 /* - 194 * We won't get problems with the target's UID changing under us - 195 * because changing it requires RCU be used, and if t != current, the - 196 * caller must be holding the RCU readlock (by way of a spinlock) and - 197 * we use RCU protection here - 198 */ - 199 user = get_uid(__task_cred(t)->user); - 200 atomic_inc(&user->sigpending); - 201 if (override_rlimit || - 202 atomic_read(&user->sigpending) <= - 203 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) - 204 q = kmem_cache_alloc(sigqueue_cachep, flags); - 205 if (unlikely(q == NULL)) { - 206 atomic_dec(&user->sigpending); - 207 free_uid(user); - 208 } else { - 209 INIT_LIST_HEAD(&q->list); - 210 q->flags = 0; - 211 q->user = user; - 212 } - 213 - 214 return q; - 215 } - -We see that this function initializes ``q->list``, ``q->flags``, and -``q->user``. It seems that now is the time to look at the definition of -``struct sigqueue``, e.g.:: - - 14 struct sigqueue { - 15 struct list_head list; - 16 int flags; - 17 siginfo_t info; - 18 struct user_struct *user; - 19 }; - -And, you might remember, it was a ``memcpy()`` on ``&first->info`` that -caused the warning, so this makes perfect sense. It also seems reasonable -to assume that it is the caller of ``__sigqueue_alloc()`` that has the -responsibility of filling out (initializing) this member. - -But just which fields of the struct were uninitialized? Let's look at -kmemcheck's report again:: - - WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024) - 80000000000000000000000000000000000000000088ffff0000000000000000 - i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u - ^ - -These first two lines are the memory dump of the memory object itself, and -the shadow bytemap, respectively. The memory object itself is in this case -``&first->info``. Just beware that the start of this dump is NOT the start -of the object itself! The position of the caret (^) corresponds with the -address of the read (ffff88003e4a2024). - -The shadow bytemap dump legend is as follows: - -- i: initialized -- u: uninitialized -- a: unallocated (memory has been allocated by the slab layer, but has not - yet been handed off to anybody) -- f: freed (memory has been allocated by the slab layer, but has been freed - by the previous owner) - -In order to figure out where (relative to the start of the object) the -uninitialized memory was located, we have to look at the disassembly. For -that, we'll need the RIP address again:: - - RIP: 0010:[] [] __dequeue_signal+0xc8/0x190 - - $ objdump -d --no-show-raw-insn vmlinux | grep -C 8 ffffffff8104ede8: - ffffffff8104edc8: mov %r8,0x8(%r8) - ffffffff8104edcc: test %r10d,%r10d - ffffffff8104edcf: js ffffffff8104ee88 <__dequeue_signal+0x168> - ffffffff8104edd5: mov %rax,%rdx - ffffffff8104edd8: mov $0xc,%ecx - ffffffff8104eddd: mov %r13,%rdi - ffffffff8104ede0: mov $0x30,%eax - ffffffff8104ede5: mov %rdx,%rsi - ffffffff8104ede8: rep movsl %ds:(%rsi),%es:(%rdi) - ffffffff8104edea: test $0x2,%al - ffffffff8104edec: je ffffffff8104edf0 <__dequeue_signal+0xd0> - ffffffff8104edee: movsw %ds:(%rsi),%es:(%rdi) - ffffffff8104edf0: test $0x1,%al - ffffffff8104edf2: je ffffffff8104edf5 <__dequeue_signal+0xd5> - ffffffff8104edf4: movsb %ds:(%rsi),%es:(%rdi) - ffffffff8104edf5: mov %r8,%rdi - ffffffff8104edf8: callq ffffffff8104de60 <__sigqueue_free> - -As expected, it's the "``rep movsl``" instruction from the ``memcpy()`` -that causes the warning. We know about ``REP MOVSL`` that it uses the register -``RCX`` to count the number of remaining iterations. By taking a look at the -register dump again (from the kmemcheck report), we can figure out how many -bytes were left to copy:: - - RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009 - -By looking at the disassembly, we also see that ``%ecx`` is being loaded -with the value ``$0xc`` just before (ffffffff8104edd8), so we are very -lucky. Keep in mind that this is the number of iterations, not bytes. And -since this is a "long" operation, we need to multiply by 4 to get the -number of bytes. So this means that the uninitialized value was encountered -at 4 * (0xc - 0x9) = 12 bytes from the start of the object. - -We can now try to figure out which field of the "``struct siginfo``" that -was not initialized. This is the beginning of the struct:: - - 40 typedef struct siginfo { - 41 int si_signo; - 42 int si_errno; - 43 int si_code; - 44 - 45 union { - .. - 92 } _sifields; - 93 } siginfo_t; - -On 64-bit, the int is 4 bytes long, so it must the union member that has -not been initialized. We can verify this using gdb:: - - $ gdb vmlinux - ... - (gdb) p &((struct siginfo *) 0)->_sifields - $1 = (union {...} *) 0x10 - -Actually, it seems that the union member is located at offset 0x10 -- which -means that gcc has inserted 4 bytes of padding between the members ``si_code`` -and ``_sifields``. We can now get a fuller picture of the memory dump:: - - _----------------------------=> si_code - / _--------------------=> (padding) - | / _------------=> _sifields(._kill._pid) - | | / _----=> _sifields(._kill._uid) - | | | / - -------|-------|-------|-------| - 80000000000000000000000000000000000000000088ffff0000000000000000 - i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u - -This allows us to realize another important fact: ``si_code`` contains the -value 0x80. Remember that x86 is little endian, so the first 4 bytes -"80000000" are really the number 0x00000080. With a bit of research, we -find that this is actually the constant ``SI_KERNEL`` defined in -``include/asm-generic/siginfo.h``:: - - 144 #define SI_KERNEL 0x80 /* sent by the kernel from somewhere */ - -This macro is used in exactly one place in the x86 kernel: In ``send_signal()`` -in ``kernel/signal.c``:: - - 816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, - 817 int group) - 818 { - ... - 828 pending = group ? &t->signal->shared_pending : &t->pending; - ... - 851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && - 852 (is_si_special(info) || - 853 info->si_code >= 0))); - 854 if (q) { - 855 list_add_tail(&q->list, &pending->list); - 856 switch ((unsigned long) info) { - ... - 865 case (unsigned long) SEND_SIG_PRIV: - 866 q->info.si_signo = sig; - 867 q->info.si_errno = 0; - 868 q->info.si_code = SI_KERNEL; - 869 q->info.si_pid = 0; - 870 q->info.si_uid = 0; - 871 break; - ... - 890 } - -Not only does this match with the ``.si_code`` member, it also matches the place -we found earlier when looking for where siginfo_t objects are enqueued on the -``shared_pending`` list. - -So to sum up: It seems that it is the padding introduced by the compiler -between two struct fields that is uninitialized, and this gets reported when -we do a ``memcpy()`` on the struct. This means that we have identified a false -positive warning. - -Normally, kmemcheck will not report uninitialized accesses in ``memcpy()`` calls -when both the source and destination addresses are tracked. (Instead, we copy -the shadow bytemap as well). In this case, the destination address clearly -was not tracked. We can dig a little deeper into the stack trace from above:: - - arch/x86/kernel/signal.c:805 - arch/x86/kernel/signal.c:871 - arch/x86/kernel/entry_64.S:694 - -And we clearly see that the destination siginfo object is located on the -stack:: - - 782 static void do_signal(struct pt_regs *regs) - 783 { - 784 struct k_sigaction ka; - 785 siginfo_t info; - ... - 804 signr = get_signal_to_deliver(&info, &ka, regs, NULL); - ... - 854 } - -And this ``&info`` is what eventually gets passed to ``copy_siginfo()`` as the -destination argument. - -Now, even though we didn't find an actual error here, the example is still a -good one, because it shows how one would go about to find out what the report -was all about. - - -Annotating false positives -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are a few different ways to make annotations in the source code that -will keep kmemcheck from checking and reporting certain allocations. Here -they are: - -- ``__GFP_NOTRACK_FALSE_POSITIVE`` - This flag can be passed to ``kmalloc()`` or ``kmem_cache_alloc()`` - (therefore also to other functions that end up calling one of - these) to indicate that the allocation should not be tracked - because it would lead to a false positive report. This is a "big - hammer" way of silencing kmemcheck; after all, even if the false - positive pertains to particular field in a struct, for example, we - will now lose the ability to find (real) errors in other parts of - the same struct. - - Example:: - - /* No warnings will ever trigger on accessing any part of x */ - x = kmalloc(sizeof *x, GFP_KERNEL | __GFP_NOTRACK_FALSE_POSITIVE); - -- ``kmemcheck_bitfield_begin(name)``/``kmemcheck_bitfield_end(name)`` and - ``kmemcheck_annotate_bitfield(ptr, name)`` - The first two of these three macros can be used inside struct - definitions to signal, respectively, the beginning and end of a - bitfield. Additionally, this will assign the bitfield a name, which - is given as an argument to the macros. - - Having used these markers, one can later use - kmemcheck_annotate_bitfield() at the point of allocation, to indicate - which parts of the allocation is part of a bitfield. - - Example:: - - struct foo { - int x; - - kmemcheck_bitfield_begin(flags); - int flag_a:1; - int flag_b:1; - kmemcheck_bitfield_end(flags); - - int y; - }; - - struct foo *x = kmalloc(sizeof *x); - - /* No warnings will trigger on accessing the bitfield of x */ - kmemcheck_annotate_bitfield(x, flags); - - Note that ``kmemcheck_annotate_bitfield()`` can be used even before the - return value of ``kmalloc()`` is checked -- in other words, passing NULL - as the first argument is legal (and will do nothing). - - -Reporting errors ----------------- - -As we have seen, kmemcheck will produce false positive reports. Therefore, it -is not very wise to blindly post kmemcheck warnings to mailing lists and -maintainers. Instead, I encourage maintainers and developers to find errors -in their own code. If you get a warning, you can try to work around it, try -to figure out if it's a real error or not, or simply ignore it. Most -developers know their own code and will quickly and efficiently determine the -root cause of a kmemcheck report. This is therefore also the most efficient -way to work with kmemcheck. - -That said, we (the kmemcheck maintainers) will always be on the lookout for -false positives that we can annotate and silence. So whatever you find, -please drop us a note privately! Kernel configs and steps to reproduce (if -available) are of course a great help too. - -Happy hacking! - - -Technical description ---------------------- - -kmemcheck works by marking memory pages non-present. This means that whenever -somebody attempts to access the page, a page fault is generated. The page -fault handler notices that the page was in fact only hidden, and so it calls -on the kmemcheck code to make further investigations. - -When the investigations are completed, kmemcheck "shows" the page by marking -it present (as it would be under normal circumstances). This way, the -interrupted code can continue as usual. - -But after the instruction has been executed, we should hide the page again, so -that we can catch the next access too! Now kmemcheck makes use of a debugging -feature of the processor, namely single-stepping. When the processor has -finished the one instruction that generated the memory access, a debug -exception is raised. From here, we simply hide the page again and continue -execution, this time with the single-stepping feature turned off. - -kmemcheck requires some assistance from the memory allocator in order to work. -The memory allocator needs to - - 1. Tell kmemcheck about newly allocated pages and pages that are about to - be freed. This allows kmemcheck to set up and tear down the shadow memory - for the pages in question. The shadow memory stores the status of each - byte in the allocation proper, e.g. whether it is initialized or - uninitialized. - - 2. Tell kmemcheck which parts of memory should be marked uninitialized. - There are actually a few more states, such as "not yet allocated" and - "recently freed". - -If a slab cache is set up using the SLAB_NOTRACK flag, it will never return -memory that can take page faults because of kmemcheck. - -If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still -request memory with the __GFP_NOTRACK or __GFP_NOTRACK_FALSE_POSITIVE flags. -This does not prevent the page faults from occurring, however, but marks the -object in question as being initialized so that no warnings will ever be -produced for this object. - -Currently, the SLAB and SLUB allocators are supported by kmemcheck. diff --git a/MAINTAINERS b/MAINTAINERS index 2811a211632c38..76ea063d8083a9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7670,16 +7670,6 @@ F: include/linux/kdb.h F: include/linux/kgdb.h F: kernel/debug/ -KMEMCHECK -M: Vegard Nossum -M: Pekka Enberg -S: Maintained -F: Documentation/dev-tools/kmemcheck.rst -F: arch/x86/include/asm/kmemcheck.h -F: arch/x86/mm/kmemcheck/ -F: include/linux/kmemcheck.h -F: mm/kmemcheck.c - KMEMLEAK M: Catalin Marinas S: Maintained diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 17de6acc0eab62..559b37bf5a2e3e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -111,7 +111,6 @@ config X86 select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_KASAN if X86_64 select HAVE_ARCH_KGDB - select HAVE_ARCH_KMEMCHECK select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT @@ -1443,7 +1442,7 @@ config ARCH_DMA_ADDR_T_64BIT config X86_DIRECT_GBPAGES def_bool y - depends on X86_64 && !DEBUG_PAGEALLOC && !KMEMCHECK + depends on X86_64 && !DEBUG_PAGEALLOC ---help--- Certain kernel features effectively disable kernel linear 1 GB mappings (even if the CPU otherwise diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h index 945a0337fbcf54..ea32a7d3cf1bc8 100644 --- a/arch/x86/include/asm/kmemcheck.h +++ b/arch/x86/include/asm/kmemcheck.h @@ -1,43 +1 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASM_X86_KMEMCHECK_H -#define ASM_X86_KMEMCHECK_H - -#include -#include - -#ifdef CONFIG_KMEMCHECK -bool kmemcheck_active(struct pt_regs *regs); - -void kmemcheck_show(struct pt_regs *regs); -void kmemcheck_hide(struct pt_regs *regs); - -bool kmemcheck_fault(struct pt_regs *regs, - unsigned long address, unsigned long error_code); -bool kmemcheck_trap(struct pt_regs *regs); -#else -static inline bool kmemcheck_active(struct pt_regs *regs) -{ - return false; -} - -static inline void kmemcheck_show(struct pt_regs *regs) -{ -} - -static inline void kmemcheck_hide(struct pt_regs *regs) -{ -} - -static inline bool kmemcheck_fault(struct pt_regs *regs, - unsigned long address, unsigned long error_code) -{ - return false; -} - -static inline bool kmemcheck_trap(struct pt_regs *regs) -{ - return false; -} -#endif /* CONFIG_KMEMCHECK */ - -#endif diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index 076502241eae19..55d392c6bd29b3 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h @@ -179,8 +179,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) * No 3D Now! */ -#ifndef CONFIG_KMEMCHECK - #if (__GNUC__ >= 4) #define memcpy(t, f, n) __builtin_memcpy(t, f, n) #else @@ -189,13 +187,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) ? __constant_memcpy((t), (f), (n)) \ : __memcpy((t), (f), (n))) #endif -#else -/* - * kmemcheck becomes very happy if we use the REP instructions unconditionally, - * because it means that we know both memory operands in advance. - */ -#define memcpy(t, f, n) __memcpy((t), (f), (n)) -#endif #endif #endif /* !CONFIG_FORTIFY_SOURCE */ diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index 0b1b4445f4c5af..533f74c300c254 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -33,7 +33,6 @@ extern void *memcpy(void *to, const void *from, size_t len); extern void *__memcpy(void *to, const void *from, size_t len); #ifndef CONFIG_FORTIFY_SOURCE -#ifndef CONFIG_KMEMCHECK #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 #define memcpy(dst, src, len) \ ({ \ @@ -46,13 +45,6 @@ extern void *__memcpy(void *to, const void *from, size_t len); __ret; \ }) #endif -#else -/* - * kmemcheck becomes very happy if we use the REP instructions unconditionally, - * because it means that we know both memory operands in advance. - */ -#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len)) -#endif #endif /* !CONFIG_FORTIFY_SOURCE */ #define __HAVE_ARCH_MEMSET diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index f5347173157c38..ef796f14f7ae50 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -250,21 +250,6 @@ static void early_init_intel(struct cpuinfo_x86 *c) if (c->x86 == 6 && c->x86_model < 15) clear_cpu_cap(c, X86_FEATURE_PAT); -#ifdef CONFIG_KMEMCHECK - /* - * P4s have a "fast strings" feature which causes single- - * stepping REP instructions to only generate a #DB on - * cache-line boundaries. - * - * Ingo Molnar reported a Pentium D (model 6) and a Xeon - * (model 2) with the same problem. - */ - if (c->x86 == 15) - if (msr_clear_bit(MSR_IA32_MISC_ENABLE, - MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0) - pr_info("kmemcheck: Disabling fast string operations\n"); -#endif - /* * If fast string is not enabled in IA32_MISC_ENABLE for any reason, * clear the fast string and enhanced fast string CPU capabilities. diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 52906808e27757..27e9e90a8d3572 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -29,8 +29,6 @@ obj-$(CONFIG_X86_PTDUMP) += debug_pagetables.o obj-$(CONFIG_HIGHMEM) += highmem_32.o -obj-$(CONFIG_KMEMCHECK) += kmemcheck/ - KASAN_SANITIZE_kasan_init_$(BITS).o := n obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index fcf38a2a4e7e6a..82f5252c723a4a 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -170,12 +170,11 @@ static void enable_global_pages(void) static void __init probe_page_size_mask(void) { /* - * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will - * use small pages. + * For pagealloc debugging, identity mapping will use small pages. * This will simplify cpa(), which otherwise needs to support splitting * large pages into small in interrupt context, etc. */ - if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK)) + if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) page_size_mask |= 1 << PG_LEVEL_2M; else direct_gbpages = 0; diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile deleted file mode 100644 index 520b3bce4095ff..00000000000000 --- a/arch/x86/mm/kmemcheck/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-y := error.o kmemcheck.o opcode.o pte.o selftest.o shadow.o diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c index 872ec4159a68fb..cec5940325151e 100644 --- a/arch/x86/mm/kmemcheck/error.c +++ b/arch/x86/mm/kmemcheck/error.c @@ -1,228 +1 @@ // SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include - -#include "error.h" -#include "shadow.h" - -enum kmemcheck_error_type { - KMEMCHECK_ERROR_INVALID_ACCESS, - KMEMCHECK_ERROR_BUG, -}; - -#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT) - -struct kmemcheck_error { - enum kmemcheck_error_type type; - - union { - /* KMEMCHECK_ERROR_INVALID_ACCESS */ - struct { - /* Kind of access that caused the error */ - enum kmemcheck_shadow state; - /* Address and size of the erroneous read */ - unsigned long address; - unsigned int size; - }; - }; - - struct pt_regs regs; - struct stack_trace trace; - unsigned long trace_entries[32]; - - /* We compress it to a char. */ - unsigned char shadow_copy[SHADOW_COPY_SIZE]; - unsigned char memory_copy[SHADOW_COPY_SIZE]; -}; - -/* - * Create a ring queue of errors to output. We can't call printk() directly - * from the kmemcheck traps, since this may call the console drivers and - * result in a recursive fault. - */ -static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE]; -static unsigned int error_count; -static unsigned int error_rd; -static unsigned int error_wr; -static unsigned int error_missed_count; - -static struct kmemcheck_error *error_next_wr(void) -{ - struct kmemcheck_error *e; - - if (error_count == ARRAY_SIZE(error_fifo)) { - ++error_missed_count; - return NULL; - } - - e = &error_fifo[error_wr]; - if (++error_wr == ARRAY_SIZE(error_fifo)) - error_wr = 0; - ++error_count; - return e; -} - -static struct kmemcheck_error *error_next_rd(void) -{ - struct kmemcheck_error *e; - - if (error_count == 0) - return NULL; - - e = &error_fifo[error_rd]; - if (++error_rd == ARRAY_SIZE(error_fifo)) - error_rd = 0; - --error_count; - return e; -} - -void kmemcheck_error_recall(void) -{ - static const char *desc[] = { - [KMEMCHECK_SHADOW_UNALLOCATED] = "unallocated", - [KMEMCHECK_SHADOW_UNINITIALIZED] = "uninitialized", - [KMEMCHECK_SHADOW_INITIALIZED] = "initialized", - [KMEMCHECK_SHADOW_FREED] = "freed", - }; - - static const char short_desc[] = { - [KMEMCHECK_SHADOW_UNALLOCATED] = 'a', - [KMEMCHECK_SHADOW_UNINITIALIZED] = 'u', - [KMEMCHECK_SHADOW_INITIALIZED] = 'i', - [KMEMCHECK_SHADOW_FREED] = 'f', - }; - - struct kmemcheck_error *e; - unsigned int i; - - e = error_next_rd(); - if (!e) - return; - - switch (e->type) { - case KMEMCHECK_ERROR_INVALID_ACCESS: - printk(KERN_WARNING "WARNING: kmemcheck: Caught %d-bit read from %s memory (%p)\n", - 8 * e->size, e->state < ARRAY_SIZE(desc) ? - desc[e->state] : "(invalid shadow state)", - (void *) e->address); - - printk(KERN_WARNING); - for (i = 0; i < SHADOW_COPY_SIZE; ++i) - printk(KERN_CONT "%02x", e->memory_copy[i]); - printk(KERN_CONT "\n"); - - printk(KERN_WARNING); - for (i = 0; i < SHADOW_COPY_SIZE; ++i) { - if (e->shadow_copy[i] < ARRAY_SIZE(short_desc)) - printk(KERN_CONT " %c", short_desc[e->shadow_copy[i]]); - else - printk(KERN_CONT " ?"); - } - printk(KERN_CONT "\n"); - printk(KERN_WARNING "%*c\n", 2 + 2 - * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^'); - break; - case KMEMCHECK_ERROR_BUG: - printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n"); - break; - } - - __show_regs(&e->regs, 1); - print_stack_trace(&e->trace, 0); -} - -static void do_wakeup(unsigned long data) -{ - while (error_count > 0) - kmemcheck_error_recall(); - - if (error_missed_count > 0) { - printk(KERN_WARNING "kmemcheck: Lost %d error reports because " - "the queue was too small\n", error_missed_count); - error_missed_count = 0; - } -} - -static DECLARE_TASKLET(kmemcheck_tasklet, &do_wakeup, 0); - -/* - * Save the context of an error report. - */ -void kmemcheck_error_save(enum kmemcheck_shadow state, - unsigned long address, unsigned int size, struct pt_regs *regs) -{ - static unsigned long prev_ip; - - struct kmemcheck_error *e; - void *shadow_copy; - void *memory_copy; - - /* Don't report several adjacent errors from the same EIP. */ - if (regs->ip == prev_ip) - return; - prev_ip = regs->ip; - - e = error_next_wr(); - if (!e) - return; - - e->type = KMEMCHECK_ERROR_INVALID_ACCESS; - - e->state = state; - e->address = address; - e->size = size; - - /* Save regs */ - memcpy(&e->regs, regs, sizeof(*regs)); - - /* Save stack trace */ - e->trace.nr_entries = 0; - e->trace.entries = e->trace_entries; - e->trace.max_entries = ARRAY_SIZE(e->trace_entries); - e->trace.skip = 0; - save_stack_trace_regs(regs, &e->trace); - - /* Round address down to nearest 16 bytes */ - shadow_copy = kmemcheck_shadow_lookup(address - & ~(SHADOW_COPY_SIZE - 1)); - BUG_ON(!shadow_copy); - - memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE); - - kmemcheck_show_addr(address); - memory_copy = (void *) (address & ~(SHADOW_COPY_SIZE - 1)); - memcpy(e->memory_copy, memory_copy, SHADOW_COPY_SIZE); - kmemcheck_hide_addr(address); - - tasklet_hi_schedule_first(&kmemcheck_tasklet); -} - -/* - * Save the context of a kmemcheck bug. - */ -void kmemcheck_error_save_bug(struct pt_regs *regs) -{ - struct kmemcheck_error *e; - - e = error_next_wr(); - if (!e) - return; - - e->type = KMEMCHECK_ERROR_BUG; - - memcpy(&e->regs, regs, sizeof(*regs)); - - e->trace.nr_entries = 0; - e->trace.entries = e->trace_entries; - e->trace.max_entries = ARRAY_SIZE(e->trace_entries); - e->trace.skip = 1; - save_stack_trace(&e->trace); - - tasklet_hi_schedule_first(&kmemcheck_tasklet); -} diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h index 39f80d7a874de9..ea32a7d3cf1bc8 100644 --- a/arch/x86/mm/kmemcheck/error.h +++ b/arch/x86/mm/kmemcheck/error.h @@ -1,16 +1 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH__X86__MM__KMEMCHECK__ERROR_H -#define ARCH__X86__MM__KMEMCHECK__ERROR_H - -#include - -#include "shadow.h" - -void kmemcheck_error_save(enum kmemcheck_shadow state, - unsigned long address, unsigned int size, struct pt_regs *regs); - -void kmemcheck_error_save_bug(struct pt_regs *regs); - -void kmemcheck_error_recall(void); - -#endif diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c deleted file mode 100644 index 4515bae36bbe5f..00000000000000 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ /dev/null @@ -1,658 +0,0 @@ -/** - * kmemcheck - a heavyweight memory checker for the linux kernel - * Copyright (C) 2007, 2008 Vegard Nossum - * (With a lot of help from Ingo Molnar and Pekka Enberg.) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License (version 2) as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "error.h" -#include "opcode.h" -#include "pte.h" -#include "selftest.h" -#include "shadow.h" - - -#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT -# define KMEMCHECK_ENABLED 0 -#endif - -#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT -# define KMEMCHECK_ENABLED 1 -#endif - -#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT -# define KMEMCHECK_ENABLED 2 -#endif - -int kmemcheck_enabled = KMEMCHECK_ENABLED; - -int __init kmemcheck_init(void) -{ -#ifdef CONFIG_SMP - /* - * Limit SMP to use a single CPU. We rely on the fact that this code - * runs before SMP is set up. - */ - if (setup_max_cpus > 1) { - printk(KERN_INFO - "kmemcheck: Limiting number of CPUs to 1.\n"); - setup_max_cpus = 1; - } -#endif - - if (!kmemcheck_selftest()) { - printk(KERN_INFO "kmemcheck: self-tests failed; disabling\n"); - kmemcheck_enabled = 0; - return -EINVAL; - } - - printk(KERN_INFO "kmemcheck: Initialized\n"); - return 0; -} - -early_initcall(kmemcheck_init); - -/* - * We need to parse the kmemcheck= option before any memory is allocated. - */ -static int __init param_kmemcheck(char *str) -{ - int val; - int ret; - - if (!str) - return -EINVAL; - - ret = kstrtoint(str, 0, &val); - if (ret) - return ret; - kmemcheck_enabled = val; - return 0; -} - -early_param("kmemcheck", param_kmemcheck); - -int kmemcheck_show_addr(unsigned long address) -{ - pte_t *pte; - - pte = kmemcheck_pte_lookup(address); - if (!pte) - return 0; - - set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); - __flush_tlb_one(address); - return 1; -} - -int kmemcheck_hide_addr(unsigned long address) -{ - pte_t *pte; - - pte = kmemcheck_pte_lookup(address); - if (!pte) - return 0; - - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); - __flush_tlb_one(address); - return 1; -} - -struct kmemcheck_context { - bool busy; - int balance; - - /* - * There can be at most two memory operands to an instruction, but - * each address can cross a page boundary -- so we may need up to - * four addresses that must be hidden/revealed for each fault. - */ - unsigned long addr[4]; - unsigned long n_addrs; - unsigned long flags; - - /* Data size of the instruction that caused a fault. */ - unsigned int size; -}; - -static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context); - -bool kmemcheck_active(struct pt_regs *regs) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - - return data->balance > 0; -} - -/* Save an address that needs to be shown/hidden */ -static void kmemcheck_save_addr(unsigned long addr) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - - BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); - data->addr[data->n_addrs++] = addr; -} - -static unsigned int kmemcheck_show_all(void) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - unsigned int i; - unsigned int n; - - n = 0; - for (i = 0; i < data->n_addrs; ++i) - n += kmemcheck_show_addr(data->addr[i]); - - return n; -} - -static unsigned int kmemcheck_hide_all(void) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - unsigned int i; - unsigned int n; - - n = 0; - for (i = 0; i < data->n_addrs; ++i) - n += kmemcheck_hide_addr(data->addr[i]); - - return n; -} - -/* - * Called from the #PF handler. - */ -void kmemcheck_show(struct pt_regs *regs) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - - BUG_ON(!irqs_disabled()); - - if (unlikely(data->balance != 0)) { - kmemcheck_show_all(); - kmemcheck_error_save_bug(regs); - data->balance = 0; - return; - } - - /* - * None of the addresses actually belonged to kmemcheck. Note that - * this is not an error. - */ - if (kmemcheck_show_all() == 0) - return; - - ++data->balance; - - /* - * The IF needs to be cleared as well, so that the faulting - * instruction can run "uninterrupted". Otherwise, we might take - * an interrupt and start executing that before we've had a chance - * to hide the page again. - * - * NOTE: In the rare case of multiple faults, we must not override - * the original flags: - */ - if (!(regs->flags & X86_EFLAGS_TF)) - data->flags = regs->flags; - - regs->flags |= X86_EFLAGS_TF; - regs->flags &= ~X86_EFLAGS_IF; -} - -/* - * Called from the #DB handler. - */ -void kmemcheck_hide(struct pt_regs *regs) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - int n; - - BUG_ON(!irqs_disabled()); - - if (unlikely(data->balance != 1)) { - kmemcheck_show_all(); - kmemcheck_error_save_bug(regs); - data->n_addrs = 0; - data->balance = 0; - - if (!(data->flags & X86_EFLAGS_TF)) - regs->flags &= ~X86_EFLAGS_TF; - if (data->flags & X86_EFLAGS_IF) - regs->flags |= X86_EFLAGS_IF; - return; - } - - if (kmemcheck_enabled) - n = kmemcheck_hide_all(); - else - n = kmemcheck_show_all(); - - if (n == 0) - return; - - --data->balance; - - data->n_addrs = 0; - - if (!(data->flags & X86_EFLAGS_TF)) - regs->flags &= ~X86_EFLAGS_TF; - if (data->flags & X86_EFLAGS_IF) - regs->flags |= X86_EFLAGS_IF; -} - -void kmemcheck_show_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) { - unsigned long address; - pte_t *pte; - unsigned int level; - - address = (unsigned long) page_address(&p[i]); - pte = lookup_address(address, &level); - BUG_ON(!pte); - BUG_ON(level != PG_LEVEL_4K); - - set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN)); - __flush_tlb_one(address); - } -} - -bool kmemcheck_page_is_tracked(struct page *p) -{ - /* This will also check the "hidden" flag of the PTE. */ - return kmemcheck_pte_lookup((unsigned long) page_address(p)); -} - -void kmemcheck_hide_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) { - unsigned long address; - pte_t *pte; - unsigned int level; - - address = (unsigned long) page_address(&p[i]); - pte = lookup_address(address, &level); - BUG_ON(!pte); - BUG_ON(level != PG_LEVEL_4K); - - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); - set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN)); - __flush_tlb_one(address); - } -} - -/* Access may NOT cross page boundary */ -static void kmemcheck_read_strict(struct pt_regs *regs, - unsigned long addr, unsigned int size) -{ - void *shadow; - enum kmemcheck_shadow status; - - shadow = kmemcheck_shadow_lookup(addr); - if (!shadow) - return; - - kmemcheck_save_addr(addr); - status = kmemcheck_shadow_test(shadow, size); - if (status == KMEMCHECK_SHADOW_INITIALIZED) - return; - - if (kmemcheck_enabled) - kmemcheck_error_save(status, addr, size, regs); - - if (kmemcheck_enabled == 2) - kmemcheck_enabled = 0; - - /* Don't warn about it again. */ - kmemcheck_shadow_set(shadow, size); -} - -bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) -{ - enum kmemcheck_shadow status; - void *shadow; - - shadow = kmemcheck_shadow_lookup(addr); - if (!shadow) - return true; - - status = kmemcheck_shadow_test_all(shadow, size); - - return status == KMEMCHECK_SHADOW_INITIALIZED; -} - -/* Access may cross page boundary */ -static void kmemcheck_read(struct pt_regs *regs, - unsigned long addr, unsigned int size) -{ - unsigned long page = addr & PAGE_MASK; - unsigned long next_addr = addr + size - 1; - unsigned long next_page = next_addr & PAGE_MASK; - - if (likely(page == next_page)) { - kmemcheck_read_strict(regs, addr, size); - return; - } - - /* - * What we do is basically to split the access across the - * two pages and handle each part separately. Yes, this means - * that we may now see reads that are 3 + 5 bytes, for - * example (and if both are uninitialized, there will be two - * reports), but it makes the code a lot simpler. - */ - kmemcheck_read_strict(regs, addr, next_page - addr); - kmemcheck_read_strict(regs, next_page, next_addr - next_page); -} - -static void kmemcheck_write_strict(struct pt_regs *regs, - unsigned long addr, unsigned int size) -{ - void *shadow; - - shadow = kmemcheck_shadow_lookup(addr); - if (!shadow) - return; - - kmemcheck_save_addr(addr); - kmemcheck_shadow_set(shadow, size); -} - -static void kmemcheck_write(struct pt_regs *regs, - unsigned long addr, unsigned int size) -{ - unsigned long page = addr & PAGE_MASK; - unsigned long next_addr = addr + size - 1; - unsigned long next_page = next_addr & PAGE_MASK; - - if (likely(page == next_page)) { - kmemcheck_write_strict(regs, addr, size); - return; - } - - /* See comment in kmemcheck_read(). */ - kmemcheck_write_strict(regs, addr, next_page - addr); - kmemcheck_write_strict(regs, next_page, next_addr - next_page); -} - -/* - * Copying is hard. We have two addresses, each of which may be split across - * a page (and each page will have different shadow addresses). - */ -static void kmemcheck_copy(struct pt_regs *regs, - unsigned long src_addr, unsigned long dst_addr, unsigned int size) -{ - uint8_t shadow[8]; - enum kmemcheck_shadow status; - - unsigned long page; - unsigned long next_addr; - unsigned long next_page; - - uint8_t *x; - unsigned int i; - unsigned int n; - - BUG_ON(size > sizeof(shadow)); - - page = src_addr & PAGE_MASK; - next_addr = src_addr + size - 1; - next_page = next_addr & PAGE_MASK; - - if (likely(page == next_page)) { - /* Same page */ - x = kmemcheck_shadow_lookup(src_addr); - if (x) { - kmemcheck_save_addr(src_addr); - for (i = 0; i < size; ++i) - shadow[i] = x[i]; - } else { - for (i = 0; i < size; ++i) - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } else { - n = next_page - src_addr; - BUG_ON(n > sizeof(shadow)); - - /* First page */ - x = kmemcheck_shadow_lookup(src_addr); - if (x) { - kmemcheck_save_addr(src_addr); - for (i = 0; i < n; ++i) - shadow[i] = x[i]; - } else { - /* Not tracked */ - for (i = 0; i < n; ++i) - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - - /* Second page */ - x = kmemcheck_shadow_lookup(next_page); - if (x) { - kmemcheck_save_addr(next_page); - for (i = n; i < size; ++i) - shadow[i] = x[i - n]; - } else { - /* Not tracked */ - for (i = n; i < size; ++i) - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } - - page = dst_addr & PAGE_MASK; - next_addr = dst_addr + size - 1; - next_page = next_addr & PAGE_MASK; - - if (likely(page == next_page)) { - /* Same page */ - x = kmemcheck_shadow_lookup(dst_addr); - if (x) { - kmemcheck_save_addr(dst_addr); - for (i = 0; i < size; ++i) { - x[i] = shadow[i]; - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } - } else { - n = next_page - dst_addr; - BUG_ON(n > sizeof(shadow)); - - /* First page */ - x = kmemcheck_shadow_lookup(dst_addr); - if (x) { - kmemcheck_save_addr(dst_addr); - for (i = 0; i < n; ++i) { - x[i] = shadow[i]; - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } - - /* Second page */ - x = kmemcheck_shadow_lookup(next_page); - if (x) { - kmemcheck_save_addr(next_page); - for (i = n; i < size; ++i) { - x[i - n] = shadow[i]; - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } - } - - status = kmemcheck_shadow_test(shadow, size); - if (status == KMEMCHECK_SHADOW_INITIALIZED) - return; - - if (kmemcheck_enabled) - kmemcheck_error_save(status, src_addr, size, regs); - - if (kmemcheck_enabled == 2) - kmemcheck_enabled = 0; -} - -enum kmemcheck_method { - KMEMCHECK_READ, - KMEMCHECK_WRITE, -}; - -static void kmemcheck_access(struct pt_regs *regs, - unsigned long fallback_address, enum kmemcheck_method fallback_method) -{ - const uint8_t *insn; - const uint8_t *insn_primary; - unsigned int size; - - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - - /* Recursive fault -- ouch. */ - if (data->busy) { - kmemcheck_show_addr(fallback_address); - kmemcheck_error_save_bug(regs); - return; - } - - data->busy = true; - - insn = (const uint8_t *) regs->ip; - insn_primary = kmemcheck_opcode_get_primary(insn); - - kmemcheck_opcode_decode(insn, &size); - - switch (insn_primary[0]) { -#ifdef CONFIG_KMEMCHECK_BITOPS_OK - /* AND, OR, XOR */ - /* - * Unfortunately, these instructions have to be excluded from - * our regular checking since they access only some (and not - * all) bits. This clears out "bogus" bitfield-access warnings. - */ - case 0x80: - case 0x81: - case 0x82: - case 0x83: - switch ((insn_primary[1] >> 3) & 7) { - /* OR */ - case 1: - /* AND */ - case 4: - /* XOR */ - case 6: - kmemcheck_write(regs, fallback_address, size); - goto out; - - /* ADD */ - case 0: - /* ADC */ - case 2: - /* SBB */ - case 3: - /* SUB */ - case 5: - /* CMP */ - case 7: - break; - } - break; -#endif - - /* MOVS, MOVSB, MOVSW, MOVSD */ - case 0xa4: - case 0xa5: - /* - * These instructions are special because they take two - * addresses, but we only get one page fault. - */ - kmemcheck_copy(regs, regs->si, regs->di, size); - goto out; - - /* CMPS, CMPSB, CMPSW, CMPSD */ - case 0xa6: - case 0xa7: - kmemcheck_read(regs, regs->si, size); - kmemcheck_read(regs, regs->di, size); - goto out; - } - - /* - * If the opcode isn't special in any way, we use the data from the - * page fault handler to determine the address and type of memory - * access. - */ - switch (fallback_method) { - case KMEMCHECK_READ: - kmemcheck_read(regs, fallback_address, size); - goto out; - case KMEMCHECK_WRITE: - kmemcheck_write(regs, fallback_address, size); - goto out; - } - -out: - data->busy = false; -} - -bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, - unsigned long error_code) -{ - pte_t *pte; - - /* - * XXX: Is it safe to assume that memory accesses from virtual 86 - * mode or non-kernel code segments will _never_ access kernel - * memory (e.g. tracked pages)? For now, we need this to avoid - * invoking kmemcheck for PnP BIOS calls. - */ - if (regs->flags & X86_VM_MASK) - return false; - if (regs->cs != __KERNEL_CS) - return false; - - pte = kmemcheck_pte_lookup(address); - if (!pte) - return false; - - WARN_ON_ONCE(in_nmi()); - - if (error_code & 2) - kmemcheck_access(regs, address, KMEMCHECK_WRITE); - else - kmemcheck_access(regs, address, KMEMCHECK_READ); - - kmemcheck_show(regs); - return true; -} - -bool kmemcheck_trap(struct pt_regs *regs) -{ - if (!kmemcheck_active(regs)) - return false; - - /* We're done. */ - kmemcheck_hide(regs); - return true; -} diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c index df8109ddf7fe30..cec5940325151e 100644 --- a/arch/x86/mm/kmemcheck/opcode.c +++ b/arch/x86/mm/kmemcheck/opcode.c @@ -1,107 +1 @@ // SPDX-License-Identifier: GPL-2.0 -#include - -#include "opcode.h" - -static bool opcode_is_prefix(uint8_t b) -{ - return - /* Group 1 */ - b == 0xf0 || b == 0xf2 || b == 0xf3 - /* Group 2 */ - || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26 - || b == 0x64 || b == 0x65 - /* Group 3 */ - || b == 0x66 - /* Group 4 */ - || b == 0x67; -} - -#ifdef CONFIG_X86_64 -static bool opcode_is_rex_prefix(uint8_t b) -{ - return (b & 0xf0) == 0x40; -} -#else -static bool opcode_is_rex_prefix(uint8_t b) -{ - return false; -} -#endif - -#define REX_W (1 << 3) - -/* - * This is a VERY crude opcode decoder. We only need to find the size of the - * load/store that caused our #PF and this should work for all the opcodes - * that we care about. Moreover, the ones who invented this instruction set - * should be shot. - */ -void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size) -{ - /* Default operand size */ - int operand_size_override = 4; - - /* prefixes */ - for (; opcode_is_prefix(*op); ++op) { - if (*op == 0x66) - operand_size_override = 2; - } - - /* REX prefix */ - if (opcode_is_rex_prefix(*op)) { - uint8_t rex = *op; - - ++op; - if (rex & REX_W) { - switch (*op) { - case 0x63: - *size = 4; - return; - case 0x0f: - ++op; - - switch (*op) { - case 0xb6: - case 0xbe: - *size = 1; - return; - case 0xb7: - case 0xbf: - *size = 2; - return; - } - - break; - } - - *size = 8; - return; - } - } - - /* escape opcode */ - if (*op == 0x0f) { - ++op; - - /* - * This is move with zero-extend and sign-extend, respectively; - * we don't have to think about 0xb6/0xbe, because this is - * already handled in the conditional below. - */ - if (*op == 0xb7 || *op == 0xbf) - operand_size_override = 2; - } - - *size = (*op & 1) ? operand_size_override : 1; -} - -const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op) -{ - /* skip prefixes */ - while (opcode_is_prefix(*op)) - ++op; - if (opcode_is_rex_prefix(*op)) - ++op; - return op; -} diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h index 51a1ce94c24a46..ea32a7d3cf1bc8 100644 --- a/arch/x86/mm/kmemcheck/opcode.h +++ b/arch/x86/mm/kmemcheck/opcode.h @@ -1,10 +1 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H -#define ARCH__X86__MM__KMEMCHECK__OPCODE_H - -#include - -void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size); -const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op); - -#endif diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c index 8a03be90272a93..cec5940325151e 100644 --- a/arch/x86/mm/kmemcheck/pte.c +++ b/arch/x86/mm/kmemcheck/pte.c @@ -1,23 +1 @@ // SPDX-License-Identifier: GPL-2.0 -#include - -#include - -#include "pte.h" - -pte_t *kmemcheck_pte_lookup(unsigned long address) -{ - pte_t *pte; - unsigned int level; - - pte = lookup_address(address, &level); - if (!pte) - return NULL; - if (level != PG_LEVEL_4K) - return NULL; - if (!pte_hidden(*pte)) - return NULL; - - return pte; -} - diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h index b595612382c27b..ea32a7d3cf1bc8 100644 --- a/arch/x86/mm/kmemcheck/pte.h +++ b/arch/x86/mm/kmemcheck/pte.h @@ -1,11 +1 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH__X86__MM__KMEMCHECK__PTE_H -#define ARCH__X86__MM__KMEMCHECK__PTE_H - -#include - -#include - -pte_t *kmemcheck_pte_lookup(unsigned long address); - -#endif diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c index 7ce0be1f99eb66..cec5940325151e 100644 --- a/arch/x86/mm/kmemcheck/selftest.c +++ b/arch/x86/mm/kmemcheck/selftest.c @@ -1,71 +1 @@ // SPDX-License-Identifier: GPL-2.0 -#include -#include - -#include "opcode.h" -#include "selftest.h" - -struct selftest_opcode { - unsigned int expected_size; - const uint8_t *insn; - const char *desc; -}; - -static const struct selftest_opcode selftest_opcodes[] = { - /* REP MOVS */ - {1, "\xf3\xa4", "rep movsb , "}, - {4, "\xf3\xa5", "rep movsl , "}, - - /* MOVZX / MOVZXD */ - {1, "\x66\x0f\xb6\x51\xf8", "movzwq , "}, - {1, "\x0f\xb6\x51\xf8", "movzwq , "}, - - /* MOVSX / MOVSXD */ - {1, "\x66\x0f\xbe\x51\xf8", "movswq , "}, - {1, "\x0f\xbe\x51\xf8", "movswq , "}, - -#ifdef CONFIG_X86_64 - /* MOVZX / MOVZXD */ - {1, "\x49\x0f\xb6\x51\xf8", "movzbq , "}, - {2, "\x49\x0f\xb7\x51\xf8", "movzbq , "}, - - /* MOVSX / MOVSXD */ - {1, "\x49\x0f\xbe\x51\xf8", "movsbq , "}, - {2, "\x49\x0f\xbf\x51\xf8", "movsbq , "}, - {4, "\x49\x63\x51\xf8", "movslq , "}, -#endif -}; - -static bool selftest_opcode_one(const struct selftest_opcode *op) -{ - unsigned size; - - kmemcheck_opcode_decode(op->insn, &size); - - if (size == op->expected_size) - return true; - - printk(KERN_WARNING "kmemcheck: opcode %s: expected size %d, got %d\n", - op->desc, op->expected_size, size); - return false; -} - -static bool selftest_opcodes_all(void) -{ - bool pass = true; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(selftest_opcodes); ++i) - pass = pass && selftest_opcode_one(&selftest_opcodes[i]); - - return pass; -} - -bool kmemcheck_selftest(void) -{ - bool pass = true; - - pass = pass && selftest_opcodes_all(); - - return pass; -} diff --git a/arch/x86/mm/kmemcheck/selftest.h b/arch/x86/mm/kmemcheck/selftest.h index 8d759aae453db6..ea32a7d3cf1bc8 100644 --- a/arch/x86/mm/kmemcheck/selftest.h +++ b/arch/x86/mm/kmemcheck/selftest.h @@ -1,7 +1 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH_X86_MM_KMEMCHECK_SELFTEST_H -#define ARCH_X86_MM_KMEMCHECK_SELFTEST_H - -bool kmemcheck_selftest(void); - -#endif diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c deleted file mode 100644 index c2638a7d2c1019..00000000000000 --- a/arch/x86/mm/kmemcheck/shadow.c +++ /dev/null @@ -1,173 +0,0 @@ -#include -#include -#include - -#include -#include - -#include "pte.h" -#include "shadow.h" - -/* - * Return the shadow address for the given address. Returns NULL if the - * address is not tracked. - * - * We need to be extremely careful not to follow any invalid pointers, - * because this function can be called for *any* possible address. - */ -void *kmemcheck_shadow_lookup(unsigned long address) -{ - pte_t *pte; - struct page *page; - - if (!virt_addr_valid(address)) - return NULL; - - pte = kmemcheck_pte_lookup(address); - if (!pte) - return NULL; - - page = virt_to_page(address); - if (!page->shadow) - return NULL; - return page->shadow + (address & (PAGE_SIZE - 1)); -} - -static void mark_shadow(void *address, unsigned int n, - enum kmemcheck_shadow status) -{ - unsigned long addr = (unsigned long) address; - unsigned long last_addr = addr + n - 1; - unsigned long page = addr & PAGE_MASK; - unsigned long last_page = last_addr & PAGE_MASK; - unsigned int first_n; - void *shadow; - - /* If the memory range crosses a page boundary, stop there. */ - if (page == last_page) - first_n = n; - else - first_n = page + PAGE_SIZE - addr; - - shadow = kmemcheck_shadow_lookup(addr); - if (shadow) - memset(shadow, status, first_n); - - addr += first_n; - n -= first_n; - - /* Do full-page memset()s. */ - while (n >= PAGE_SIZE) { - shadow = kmemcheck_shadow_lookup(addr); - if (shadow) - memset(shadow, status, PAGE_SIZE); - - addr += PAGE_SIZE; - n -= PAGE_SIZE; - } - - /* Do the remaining page, if any. */ - if (n > 0) { - shadow = kmemcheck_shadow_lookup(addr); - if (shadow) - memset(shadow, status, n); - } -} - -void kmemcheck_mark_unallocated(void *address, unsigned int n) -{ - mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED); -} - -void kmemcheck_mark_uninitialized(void *address, unsigned int n) -{ - mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED); -} - -/* - * Fill the shadow memory of the given address such that the memory at that - * address is marked as being initialized. - */ -void kmemcheck_mark_initialized(void *address, unsigned int n) -{ - mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED); -} -EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized); - -void kmemcheck_mark_freed(void *address, unsigned int n) -{ - mark_shadow(address, n, KMEMCHECK_SHADOW_FREED); -} - -void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) - kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE); -} - -void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) - kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE); -} - -void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) - kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE); -} - -enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) -{ -#ifdef CONFIG_KMEMCHECK_PARTIAL_OK - uint8_t *x; - unsigned int i; - - x = shadow; - - /* - * Make sure _some_ bytes are initialized. Gcc frequently generates - * code to access neighboring bytes. - */ - for (i = 0; i < size; ++i) { - if (x[i] == KMEMCHECK_SHADOW_INITIALIZED) - return x[i]; - } - - return x[0]; -#else - return kmemcheck_shadow_test_all(shadow, size); -#endif -} - -enum kmemcheck_shadow kmemcheck_shadow_test_all(void *shadow, unsigned int size) -{ - uint8_t *x; - unsigned int i; - - x = shadow; - - /* All bytes must be initialized. */ - for (i = 0; i < size; ++i) { - if (x[i] != KMEMCHECK_SHADOW_INITIALIZED) - return x[i]; - } - - return x[0]; -} - -void kmemcheck_shadow_set(void *shadow, unsigned int size) -{ - uint8_t *x; - unsigned int i; - - x = shadow; - for (i = 0; i < size; ++i) - x[i] = KMEMCHECK_SHADOW_INITIALIZED; -} diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h index 49768dc18664be..ea32a7d3cf1bc8 100644 --- a/arch/x86/mm/kmemcheck/shadow.h +++ b/arch/x86/mm/kmemcheck/shadow.h @@ -1,19 +1 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH__X86__MM__KMEMCHECK__SHADOW_H -#define ARCH__X86__MM__KMEMCHECK__SHADOW_H - -enum kmemcheck_shadow { - KMEMCHECK_SHADOW_UNALLOCATED, - KMEMCHECK_SHADOW_UNINITIALIZED, - KMEMCHECK_SHADOW_INITIALIZED, - KMEMCHECK_SHADOW_FREED, -}; - -void *kmemcheck_shadow_lookup(unsigned long address); - -enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size); -enum kmemcheck_shadow kmemcheck_shadow_test_all(void *shadow, - unsigned int size); -void kmemcheck_shadow_set(void *shadow, unsigned int size); - -#endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index baeb872283d91b..69c238210325e7 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -594,21 +594,6 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t) __tasklet_hi_schedule(t); } -extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); - -/* - * This version avoids touching any other tasklets. Needed for kmemcheck - * in order not to take any page faults while enqueueing this tasklet; - * consider VERY carefully whether you really need this or - * tasklet_hi_schedule()... - */ -static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) -{ - if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) - __tasklet_hi_schedule_first(t); -} - - static inline void tasklet_disable_nosync(struct tasklet_struct *t) { atomic_inc(&t->count); diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index 7b1d7bead7d93c..ea32a7d3cf1bc8 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h @@ -1,172 +1 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef LINUX_KMEMCHECK_H -#define LINUX_KMEMCHECK_H - -#include -#include - -#ifdef CONFIG_KMEMCHECK -extern int kmemcheck_enabled; - -/* The slab-related functions. */ -void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node); -void kmemcheck_free_shadow(struct page *page, int order); -void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, - size_t size); -void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size); - -void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order, - gfp_t gfpflags); - -void kmemcheck_show_pages(struct page *p, unsigned int n); -void kmemcheck_hide_pages(struct page *p, unsigned int n); - -bool kmemcheck_page_is_tracked(struct page *p); - -void kmemcheck_mark_unallocated(void *address, unsigned int n); -void kmemcheck_mark_uninitialized(void *address, unsigned int n); -void kmemcheck_mark_initialized(void *address, unsigned int n); -void kmemcheck_mark_freed(void *address, unsigned int n); - -void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n); -void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n); -void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n); - -int kmemcheck_show_addr(unsigned long address); -int kmemcheck_hide_addr(unsigned long address); - -bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size); - -/* - * Bitfield annotations - * - * How to use: If you have a struct using bitfields, for example - * - * struct a { - * int x:8, y:8; - * }; - * - * then this should be rewritten as - * - * struct a { - * kmemcheck_bitfield_begin(flags); - * int x:8, y:8; - * kmemcheck_bitfield_end(flags); - * }; - * - * Now the "flags_begin" and "flags_end" members may be used to refer to the - * beginning and end, respectively, of the bitfield (and things like - * &x.flags_begin is allowed). As soon as the struct is allocated, the bit- - * fields should be annotated: - * - * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL); - * kmemcheck_annotate_bitfield(a, flags); - */ -#define kmemcheck_bitfield_begin(name) \ - int name##_begin[0]; - -#define kmemcheck_bitfield_end(name) \ - int name##_end[0]; - -#define kmemcheck_annotate_bitfield(ptr, name) \ - do { \ - int _n; \ - \ - if (!ptr) \ - break; \ - \ - _n = (long) &((ptr)->name##_end) \ - - (long) &((ptr)->name##_begin); \ - BUILD_BUG_ON(_n < 0); \ - \ - kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \ - } while (0) - -#define kmemcheck_annotate_variable(var) \ - do { \ - kmemcheck_mark_initialized(&(var), sizeof(var)); \ - } while (0) \ - -#else -#define kmemcheck_enabled 0 - -static inline void -kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) -{ -} - -static inline void -kmemcheck_free_shadow(struct page *page, int order) -{ -} - -static inline void -kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, - size_t size) -{ -} - -static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object, - size_t size) -{ -} - -static inline void kmemcheck_pagealloc_alloc(struct page *p, - unsigned int order, gfp_t gfpflags) -{ -} - -static inline bool kmemcheck_page_is_tracked(struct page *p) -{ - return false; -} - -static inline void kmemcheck_mark_unallocated(void *address, unsigned int n) -{ -} - -static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n) -{ -} - -static inline void kmemcheck_mark_initialized(void *address, unsigned int n) -{ -} - -static inline void kmemcheck_mark_freed(void *address, unsigned int n) -{ -} - -static inline void kmemcheck_mark_unallocated_pages(struct page *p, - unsigned int n) -{ -} - -static inline void kmemcheck_mark_uninitialized_pages(struct page *p, - unsigned int n) -{ -} - -static inline void kmemcheck_mark_initialized_pages(struct page *p, - unsigned int n) -{ -} - -static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) -{ - return true; -} - -#define kmemcheck_bitfield_begin(name) -#define kmemcheck_bitfield_end(name) -#define kmemcheck_annotate_bitfield(ptr, name) \ - do { \ - } while (0) - -#define kmemcheck_annotate_variable(var) \ - do { \ - } while (0) - -#endif /* CONFIG_KMEMCHECK */ - -#endif /* LINUX_KMEMCHECK_H */ diff --git a/kernel/softirq.c b/kernel/softirq.c index 4e09821f9d9e8b..e89c3b0cff6dff 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -486,16 +486,6 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) } EXPORT_SYMBOL(__tasklet_hi_schedule); -void __tasklet_hi_schedule_first(struct tasklet_struct *t) -{ - BUG_ON(!irqs_disabled()); - - t->next = __this_cpu_read(tasklet_hi_vec.head); - __this_cpu_write(tasklet_hi_vec.head, t); - __raise_softirq_irqoff(HI_SOFTIRQ); -} -EXPORT_SYMBOL(__tasklet_hi_schedule_first); - static __latent_entropy void tasklet_action(struct softirq_action *a) { struct tasklet_struct *list; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 56aca862c4f584..069550540a3925 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -1173,15 +1172,6 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &one_thousand, }, -#endif -#ifdef CONFIG_KMEMCHECK - { - .procname = "kmemcheck", - .data = &kmemcheck_enabled, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, #endif { .procname = "panic_on_warn", diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 00cb02daeddddd..62d0e25c054c6a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -504,7 +504,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT config DEBUG_SLAB bool "Debug slab memory allocations" - depends on DEBUG_KERNEL && SLAB && !KMEMCHECK + depends on DEBUG_KERNEL && SLAB help Say Y here to have the kernel do limited verification on memory allocation as well as poisoning memory on free to catch use of freed @@ -516,7 +516,7 @@ config DEBUG_SLAB_LEAK config SLUB_DEBUG_ON bool "SLUB debugging on by default" - depends on SLUB && SLUB_DEBUG && !KMEMCHECK + depends on SLUB && SLUB_DEBUG default n help Boot with debugging on by default. SLUB boots by default with @@ -730,8 +730,6 @@ config DEBUG_STACKOVERFLOW If in doubt, say "N". -source "lib/Kconfig.kmemcheck" - source "lib/Kconfig.kasan" endmenu # "Memory Debugging" diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck deleted file mode 100644 index 846e039a86b4b4..00000000000000 --- a/lib/Kconfig.kmemcheck +++ /dev/null @@ -1,94 +0,0 @@ -config HAVE_ARCH_KMEMCHECK - bool - -if HAVE_ARCH_KMEMCHECK - -menuconfig KMEMCHECK - bool "kmemcheck: trap use of uninitialized memory" - depends on DEBUG_KERNEL - depends on !X86_USE_3DNOW - depends on SLUB || SLAB - depends on !CC_OPTIMIZE_FOR_SIZE - depends on !FUNCTION_TRACER - select FRAME_POINTER - select STACKTRACE - default n - help - This option enables tracing of dynamically allocated kernel memory - to see if memory is used before it has been given an initial value. - Be aware that this requires half of your memory for bookkeeping and - will insert extra code at *every* read and write to tracked memory - thus slow down the kernel code (but user code is unaffected). - - The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable - or enable kmemcheck at boot-time. If the kernel is started with - kmemcheck=0, the large memory and CPU overhead is not incurred. - -choice - prompt "kmemcheck: default mode at boot" - depends on KMEMCHECK - default KMEMCHECK_ONESHOT_BY_DEFAULT - help - This option controls the default behaviour of kmemcheck when the - kernel boots and no kmemcheck= parameter is given. - -config KMEMCHECK_DISABLED_BY_DEFAULT - bool "disabled" - depends on KMEMCHECK - -config KMEMCHECK_ENABLED_BY_DEFAULT - bool "enabled" - depends on KMEMCHECK - -config KMEMCHECK_ONESHOT_BY_DEFAULT - bool "one-shot" - depends on KMEMCHECK - help - In one-shot mode, only the first error detected is reported before - kmemcheck is disabled. - -endchoice - -config KMEMCHECK_QUEUE_SIZE - int "kmemcheck: error queue size" - depends on KMEMCHECK - default 64 - help - Select the maximum number of errors to store in the queue. Since - errors can occur virtually anywhere and in any context, we need a - temporary storage area which is guarantueed not to generate any - other faults. The queue will be emptied as soon as a tasklet may - be scheduled. If the queue is full, new error reports will be - lost. - -config KMEMCHECK_SHADOW_COPY_SHIFT - int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)" - depends on KMEMCHECK - range 2 8 - default 5 - help - Select the number of shadow bytes to save along with each entry of - the queue. These bytes indicate what parts of an allocation are - initialized, uninitialized, etc. and will be displayed when an - error is detected to help the debugging of a particular problem. - -config KMEMCHECK_PARTIAL_OK - bool "kmemcheck: allow partially uninitialized memory" - depends on KMEMCHECK - default y - help - This option works around certain GCC optimizations that produce - 32-bit reads from 16-bit variables where the upper 16 bits are - thrown away afterwards. This may of course also hide some real - bugs. - -config KMEMCHECK_BITOPS_OK - bool "kmemcheck: allow bit-field manipulation" - depends on KMEMCHECK - default n - help - This option silences warnings that would be generated for bit-field - accesses where not all the bits are initialized at the same time. - This may also hide some real bugs. - -endif diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index 5b0adf1435de7d..e5e606ee5f715e 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -11,7 +11,6 @@ config DEBUG_PAGEALLOC bool "Debug page memory allocations" depends on DEBUG_KERNEL depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC - depends on !KMEMCHECK select PAGE_EXTENSION select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC ---help--- diff --git a/mm/Makefile b/mm/Makefile index 4659b93cba43bb..e7ebd176fb935e 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -17,7 +17,6 @@ KCOV_INSTRUMENT_slub.o := n KCOV_INSTRUMENT_page_alloc.o := n KCOV_INSTRUMENT_debug-pagealloc.o := n KCOV_INSTRUMENT_kmemleak.o := n -KCOV_INSTRUMENT_kmemcheck.o := n KCOV_INSTRUMENT_memcontrol.o := n KCOV_INSTRUMENT_mmzone.o := n KCOV_INSTRUMENT_vmstat.o := n @@ -70,7 +69,6 @@ obj-$(CONFIG_KSM) += ksm.o obj-$(CONFIG_PAGE_POISONING) += page_poison.o obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLUB) += slub.o -obj-$(CONFIG_KMEMCHECK) += kmemcheck.o obj-$(CONFIG_KASAN) += kasan/ obj-$(CONFIG_FAILSLAB) += failslab.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c index b3a4d61d341c6a..cec5940325151e 100644 --- a/mm/kmemcheck.c +++ b/mm/kmemcheck.c @@ -1,126 +1 @@ // SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include "slab.h" -#include - -void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) -{ - struct page *shadow; - int pages; - int i; - - pages = 1 << order; - - /* - * With kmemcheck enabled, we need to allocate a memory area for the - * shadow bits as well. - */ - shadow = alloc_pages_node(node, flags, order); - if (!shadow) { - if (printk_ratelimit()) - pr_err("kmemcheck: failed to allocate shadow bitmap\n"); - return; - } - - for(i = 0; i < pages; ++i) - page[i].shadow = page_address(&shadow[i]); - - /* - * Mark it as non-present for the MMU so that our accesses to - * this memory will trigger a page fault and let us analyze - * the memory accesses. - */ - kmemcheck_hide_pages(page, pages); -} - -void kmemcheck_free_shadow(struct page *page, int order) -{ - struct page *shadow; - int pages; - int i; - - if (!kmemcheck_page_is_tracked(page)) - return; - - pages = 1 << order; - - kmemcheck_show_pages(page, pages); - - shadow = virt_to_page(page[0].shadow); - - for(i = 0; i < pages; ++i) - page[i].shadow = NULL; - - __free_pages(shadow, order); -} - -void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, - size_t size) -{ - if (unlikely(!object)) /* Skip object if allocation failed */ - return; - - /* - * Has already been memset(), which initializes the shadow for us - * as well. - */ - if (gfpflags & __GFP_ZERO) - return; - - /* No need to initialize the shadow of a non-tracked slab. */ - if (s->flags & SLAB_NOTRACK) - return; - - if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) { - /* - * Allow notracked objects to be allocated from - * tracked caches. Note however that these objects - * will still get page faults on access, they just - * won't ever be flagged as uninitialized. If page - * faults are not acceptable, the slab cache itself - * should be marked NOTRACK. - */ - kmemcheck_mark_initialized(object, size); - } else if (!s->ctor) { - /* - * New objects should be marked uninitialized before - * they're returned to the called. - */ - kmemcheck_mark_uninitialized(object, size); - } -} - -void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) -{ - /* TODO: RCU freeing is unsupported for now; hide false positives. */ - if (!s->ctor && !(s->flags & SLAB_TYPESAFE_BY_RCU)) - kmemcheck_mark_freed(object, size); -} - -void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order, - gfp_t gfpflags) -{ - int pages; - - if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK)) - return; - - pages = 1 << order; - - /* - * NOTE: We choose to track GFP_ZERO pages too; in fact, they - * can become uninitialized by copying uninitialized memory - * into them. - */ - - /* XXX: Can use zone->node for node? */ - kmemcheck_alloc_shadow(page, order, gfpflags, -1); - - if (gfpflags & __GFP_ZERO) - kmemcheck_mark_initialized_pages(page, pages); - else - kmemcheck_mark_uninitialized_pages(page, pages); -} diff --git a/mm/slub.c b/mm/slub.c index 38acb5c114edab..41c01690d116ff 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1369,7 +1369,7 @@ static inline void *slab_free_hook(struct kmem_cache *s, void *x) * So in order to make the debug calls that expect irqs to be * disabled we need to disable interrupts temporarily. */ -#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP) +#ifdef CONFIG_LOCKDEP { unsigned long flags; @@ -1397,8 +1397,7 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s, * Compiler cannot detect this function can be removed if slab_free_hook() * evaluates to nothing. Thus, catch all relevant config debug options here. */ -#if defined(CONFIG_KMEMCHECK) || \ - defined(CONFIG_LOCKDEP) || \ +#if defined(CONFIG_LOCKDEP) || \ defined(CONFIG_DEBUG_KMEMLEAK) || \ defined(CONFIG_DEBUG_OBJECTS_FREE) || \ defined(CONFIG_KASAN) diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 9d3eafea58f06a..8323ff9dec714e 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -2182,8 +2182,6 @@ sub dump_struct($$) { # strip comments: $members =~ s/\/\*.*?\*\///gos; $nested =~ s/\/\*.*?\*\///gos; - # strip kmemcheck_bitfield_{begin,end}.*; - $members =~ s/kmemcheck_bitfield_.*?;//gos; # strip attributes $members =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i; $members =~ s/__aligned\s*\([^;]*\)//gos; diff --git a/tools/include/linux/kmemcheck.h b/tools/include/linux/kmemcheck.h index 2bccd2c7b8974e..ea32a7d3cf1bc8 100644 --- a/tools/include/linux/kmemcheck.h +++ b/tools/include/linux/kmemcheck.h @@ -1,9 +1 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LINUX_KMEMCHECK_H_ -#define _LIBLOCKDEP_LINUX_KMEMCHECK_H_ - -static inline void kmemcheck_mark_initialized(void *address, unsigned int n) -{ -} - -#endif From d6d0c0a618a15ba4dadd7ad6131a814359a53973 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 6 Dec 2017 11:27:57 +0100 Subject: [PATCH 2062/3326] kmemcheck: rip it out for real commit f335195adf043168ee69d78ea72ac3e30f0c57ce upstream. Commit 4675ff05de2d ("kmemcheck: rip it out") has removed the code but for some reason SPDX header stayed in place. This looks like a rebase mistake in the mmotm tree or the merge mistake. Let's drop those leftovers as well. Signed-off-by: Michal Hocko Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/kmemcheck.h | 1 - arch/x86/mm/kmemcheck/error.c | 1 - arch/x86/mm/kmemcheck/error.h | 1 - arch/x86/mm/kmemcheck/opcode.c | 1 - arch/x86/mm/kmemcheck/opcode.h | 1 - arch/x86/mm/kmemcheck/pte.c | 1 - arch/x86/mm/kmemcheck/pte.h | 1 - arch/x86/mm/kmemcheck/selftest.c | 1 - arch/x86/mm/kmemcheck/selftest.h | 1 - arch/x86/mm/kmemcheck/shadow.h | 1 - include/linux/kmemcheck.h | 1 - mm/kmemcheck.c | 1 - tools/include/linux/kmemcheck.h | 1 - 13 files changed, 13 deletions(-) delete mode 100644 arch/x86/include/asm/kmemcheck.h delete mode 100644 arch/x86/mm/kmemcheck/error.c delete mode 100644 arch/x86/mm/kmemcheck/error.h delete mode 100644 arch/x86/mm/kmemcheck/opcode.c delete mode 100644 arch/x86/mm/kmemcheck/opcode.h delete mode 100644 arch/x86/mm/kmemcheck/pte.c delete mode 100644 arch/x86/mm/kmemcheck/pte.h delete mode 100644 arch/x86/mm/kmemcheck/selftest.c delete mode 100644 arch/x86/mm/kmemcheck/selftest.h delete mode 100644 arch/x86/mm/kmemcheck/shadow.h delete mode 100644 include/linux/kmemcheck.h delete mode 100644 mm/kmemcheck.c delete mode 100644 tools/include/linux/kmemcheck.h diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h deleted file mode 100644 index ea32a7d3cf1bc8..00000000000000 --- a/arch/x86/include/asm/kmemcheck.h +++ /dev/null @@ -1 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c deleted file mode 100644 index cec5940325151e..00000000000000 --- a/arch/x86/mm/kmemcheck/error.c +++ /dev/null @@ -1 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h deleted file mode 100644 index ea32a7d3cf1bc8..00000000000000 --- a/arch/x86/mm/kmemcheck/error.h +++ /dev/null @@ -1 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c deleted file mode 100644 index cec5940325151e..00000000000000 --- a/arch/x86/mm/kmemcheck/opcode.c +++ /dev/null @@ -1 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h deleted file mode 100644 index ea32a7d3cf1bc8..00000000000000 --- a/arch/x86/mm/kmemcheck/opcode.h +++ /dev/null @@ -1 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c deleted file mode 100644 index cec5940325151e..00000000000000 --- a/arch/x86/mm/kmemcheck/pte.c +++ /dev/null @@ -1 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h deleted file mode 100644 index ea32a7d3cf1bc8..00000000000000 --- a/arch/x86/mm/kmemcheck/pte.h +++ /dev/null @@ -1 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c deleted file mode 100644 index cec5940325151e..00000000000000 --- a/arch/x86/mm/kmemcheck/selftest.c +++ /dev/null @@ -1 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 diff --git a/arch/x86/mm/kmemcheck/selftest.h b/arch/x86/mm/kmemcheck/selftest.h deleted file mode 100644 index ea32a7d3cf1bc8..00000000000000 --- a/arch/x86/mm/kmemcheck/selftest.h +++ /dev/null @@ -1 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h deleted file mode 100644 index ea32a7d3cf1bc8..00000000000000 --- a/arch/x86/mm/kmemcheck/shadow.h +++ /dev/null @@ -1 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h deleted file mode 100644 index ea32a7d3cf1bc8..00000000000000 --- a/include/linux/kmemcheck.h +++ /dev/null @@ -1 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c deleted file mode 100644 index cec5940325151e..00000000000000 --- a/mm/kmemcheck.c +++ /dev/null @@ -1 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 diff --git a/tools/include/linux/kmemcheck.h b/tools/include/linux/kmemcheck.h deleted file mode 100644 index ea32a7d3cf1bc8..00000000000000 --- a/tools/include/linux/kmemcheck.h +++ /dev/null @@ -1 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ From 208beef6d8f223a0cc57e2fcecc9ca28fc778f50 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 31 Jan 2018 08:03:10 -0800 Subject: [PATCH 2063/3326] x86/mm: Rename flush_tlb_single() and flush_tlb_one() to __flush_tlb_one_[user|kernel]() commit 1299ef1d8870d2d9f09a5aadf2f8b2c887c2d033 upstream. flush_tlb_single() and flush_tlb_one() sound almost identical, but they really mean "flush one user translation" and "flush one kernel translation". Rename them to flush_tlb_one_user() and flush_tlb_one_kernel() to make the semantics more obvious. [ I was looking at some PTI-related code, and the flush-one-address code is unnecessarily hard to understand because the names of the helpers are uninformative. This came up during PTI review, but no one got around to doing it. ] Signed-off-by: Andy Lutomirski Acked-by: Peter Zijlstra (Intel) Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Eduardo Valentin Cc: Hugh Dickins Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Kees Cook Cc: Linus Torvalds Cc: Linux-MM Cc: Rik van Riel Cc: Thomas Gleixner Cc: Will Deacon Link: http://lkml.kernel.org/r/3303b02e3c3d049dc5235d5651e0ae6d29a34354.1517414378.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/paravirt.h | 4 ++-- arch/x86/include/asm/paravirt_types.h | 2 +- arch/x86/include/asm/pgtable_32.h | 2 +- arch/x86/include/asm/tlbflush.h | 27 ++++++++++++++++++++------- arch/x86/kernel/acpi/apei.c | 2 +- arch/x86/kernel/paravirt.c | 6 +++--- arch/x86/mm/init_64.c | 2 +- arch/x86/mm/ioremap.c | 2 +- arch/x86/mm/kmmio.c | 2 +- arch/x86/mm/pgtable_32.c | 2 +- arch/x86/mm/tlb.c | 6 +++--- arch/x86/platform/uv/tlb_uv.c | 2 +- arch/x86/xen/mmu_pv.c | 6 +++--- include/trace/events/xen.h | 2 +- 14 files changed, 40 insertions(+), 27 deletions(-) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 892df375b6155a..554841fab717ae 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -297,9 +297,9 @@ static inline void __flush_tlb_global(void) { PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel); } -static inline void __flush_tlb_single(unsigned long addr) +static inline void __flush_tlb_one_user(unsigned long addr) { - PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); + PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr); } static inline void flush_tlb_others(const struct cpumask *cpumask, diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 6ec54d01972dcf..f624f1f10316c2 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -217,7 +217,7 @@ struct pv_mmu_ops { /* TLB operations */ void (*flush_tlb_user)(void); void (*flush_tlb_kernel)(void); - void (*flush_tlb_single)(unsigned long addr); + void (*flush_tlb_one_user)(unsigned long addr); void (*flush_tlb_others)(const struct cpumask *cpus, const struct flush_tlb_info *info); diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index e67c0620aec2a2..e55466760ff8e0 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -61,7 +61,7 @@ void paging_init(void); #define kpte_clear_flush(ptep, vaddr) \ do { \ pte_clear(&init_mm, (vaddr), (ptep)); \ - __flush_tlb_one((vaddr)); \ + __flush_tlb_one_kernel((vaddr)); \ } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 4405c4b308e899..704f31315ddeb3 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -140,7 +140,7 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) #else #define __flush_tlb() __native_flush_tlb() #define __flush_tlb_global() __native_flush_tlb_global() -#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) +#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr) #endif static inline bool tlb_defer_switch_to_init_mm(void) @@ -397,7 +397,7 @@ static inline void __native_flush_tlb_global(void) /* * flush one page in the user mapping */ -static inline void __native_flush_tlb_single(unsigned long addr) +static inline void __native_flush_tlb_one_user(unsigned long addr) { u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); @@ -434,18 +434,31 @@ static inline void __flush_tlb_all(void) /* * flush one page in the kernel mapping */ -static inline void __flush_tlb_one(unsigned long addr) +static inline void __flush_tlb_one_kernel(unsigned long addr) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); - __flush_tlb_single(addr); + + /* + * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its + * paravirt equivalent. Even with PCID, this is sufficient: we only + * use PCID if we also use global PTEs for the kernel mapping, and + * INVLPG flushes global translations across all address spaces. + * + * If PTI is on, then the kernel is mapped with non-global PTEs, and + * __flush_tlb_one_user() will flush the given address for the current + * kernel address space and for its usermode counterpart, but it does + * not flush it for other address spaces. + */ + __flush_tlb_one_user(addr); if (!static_cpu_has(X86_FEATURE_PTI)) return; /* - * __flush_tlb_single() will have cleared the TLB entry for this ASID, - * but since kernel space is replicated across all, we must also - * invalidate all others. + * See above. We need to propagate the flush to all other address + * spaces. In principle, we only need to propagate it to kernelmode + * address spaces, but the extra bookkeeping we would need is not + * worth it. */ invalidate_other_asid(); } diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c index ea3046e0b0cf53..28d70ac93fafbc 100644 --- a/arch/x86/kernel/acpi/apei.c +++ b/arch/x86/kernel/acpi/apei.c @@ -55,5 +55,5 @@ void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) void arch_apei_flush_tlb_one(unsigned long addr) { - __flush_tlb_one(addr); + __flush_tlb_one_kernel(addr); } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 19a3e8f961c772..e1df9ef5d78c95 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -190,9 +190,9 @@ static void native_flush_tlb_global(void) __native_flush_tlb_global(); } -static void native_flush_tlb_single(unsigned long addr) +static void native_flush_tlb_one_user(unsigned long addr) { - __native_flush_tlb_single(addr); + __native_flush_tlb_one_user(addr); } struct static_key paravirt_steal_enabled; @@ -391,7 +391,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = { .flush_tlb_user = native_flush_tlb, .flush_tlb_kernel = native_flush_tlb_global, - .flush_tlb_single = native_flush_tlb_single, + .flush_tlb_one_user = native_flush_tlb_one_user, .flush_tlb_others = native_flush_tlb_others, .pgd_alloc = __paravirt_pgd_alloc, diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 5fa3a58b5d7865..fe85d1204db8f4 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ - __flush_tlb_one(vaddr); + __flush_tlb_one_kernel(vaddr); } void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 34f0e1847dd64b..bb120e59c59799 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -749,5 +749,5 @@ void __init __early_set_fixmap(enum fixed_addresses idx, set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); else pte_clear(&init_mm, addr, pte); - __flush_tlb_one(addr); + __flush_tlb_one_kernel(addr); } diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index c21c2ed046120c..aa44c3aa4cd5cb 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -168,7 +168,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear) return -1; } - __flush_tlb_one(f->addr); + __flush_tlb_one_kernel(f->addr); return 0; } diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index c3c5274410a908..9bb7f0ab9fe625 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -63,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval) * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ - __flush_tlb_one(vaddr); + __flush_tlb_one_kernel(vaddr); } unsigned long __FIXADDR_TOP = 0xfffff000; diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 012d0262484894..0c936435ea9398 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -492,7 +492,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, * flush that changes context.tlb_gen from 2 to 3. If they get * processed on this CPU in reverse order, we'll see * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL. - * If we were to use __flush_tlb_single() and set local_tlb_gen to + * If we were to use __flush_tlb_one_user() and set local_tlb_gen to * 3, we'd be break the invariant: we'd update local_tlb_gen above * 1 without the full flush that's needed for tlb_gen 2. * @@ -513,7 +513,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, addr = f->start; while (addr < f->end) { - __flush_tlb_single(addr); + __flush_tlb_one_user(addr); addr += PAGE_SIZE; } if (local) @@ -660,7 +660,7 @@ static void do_kernel_range_flush(void *info) /* flush range by one by one 'invlpg' */ for (addr = f->start; addr < f->end; addr += PAGE_SIZE) - __flush_tlb_one(addr); + __flush_tlb_one_kernel(addr); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 8538a6723171a5..7d5d53f36a7abd 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, local_flush_tlb(); stat->d_alltlb++; } else { - __flush_tlb_single(msg->address); + __flush_tlb_one_user(msg->address); stat->d_onetlb++; } stat->d_requestee++; diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index a0e2b8c6e5c73c..d0b943a6b1177b 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -1300,12 +1300,12 @@ static void xen_flush_tlb(void) preempt_enable(); } -static void xen_flush_tlb_single(unsigned long addr) +static void xen_flush_tlb_one_user(unsigned long addr) { struct mmuext_op *op; struct multicall_space mcs; - trace_xen_mmu_flush_tlb_single(addr); + trace_xen_mmu_flush_tlb_one_user(addr); preempt_disable(); @@ -2360,7 +2360,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .flush_tlb_user = xen_flush_tlb, .flush_tlb_kernel = xen_flush_tlb, - .flush_tlb_single = xen_flush_tlb_single, + .flush_tlb_one_user = xen_flush_tlb_one_user, .flush_tlb_others = xen_flush_tlb_others, .pgd_alloc = xen_pgd_alloc, diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h index a7c8b452aab9c7..d791863b62fc36 100644 --- a/include/trace/events/xen.h +++ b/include/trace/events/xen.h @@ -365,7 +365,7 @@ TRACE_EVENT(xen_mmu_flush_tlb, TP_printk("%s", "") ); -TRACE_EVENT(xen_mmu_flush_tlb_single, +TRACE_EVENT(xen_mmu_flush_tlb_one_user, TP_PROTO(unsigned long addr), TP_ARGS(addr), TP_STRUCT__entry( From 73f231c7ee696cdc53ee0922e6d3bf0480a5ae90 Mon Sep 17 00:00:00 2001 From: Rui Wang Date: Mon, 18 Dec 2017 16:34:10 +0800 Subject: [PATCH 2064/3326] selftests/x86/mpx: Fix incorrect bounds with old _sigfault commit 961888b1d76d84efc66a8f5604b06ac12ac2f978 upstream. For distributions with old userspace header files, the _sigfault structure is different. mpx-mini-test fails with the following error: [root@Purley]# mpx-mini-test_64 tabletest XSAVE is supported by HW & OS XSAVE processor supported state mask: 0x2ff XSAVE OS supported state mask: 0x2ff BNDREGS: size: 64 user: 1 supervisor: 0 aligned: 0 BNDCSR: size: 64 user: 1 supervisor: 0 aligned: 0 starting mpx bounds table test ERROR: siginfo bounds do not match shadow bounds for register 0 Fix it by using the correct offset of _lower/_upper in _sigfault. RHEL needs this patch to work. Signed-off-by: Rui Wang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dave.hansen@linux.intel.com Fixes: e754aedc26ef ("x86/mpx, selftests: Add MPX self test") Link: http://lkml.kernel.org/r/1513586050-1641-1-git-send-email-rui.y.wang@intel.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/x86/mpx-mini-test.c | 32 +++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c index ec0f6b45ce8b4d..9c0325e1ea6844 100644 --- a/tools/testing/selftests/x86/mpx-mini-test.c +++ b/tools/testing/selftests/x86/mpx-mini-test.c @@ -315,11 +315,39 @@ static inline void *__si_bounds_upper(siginfo_t *si) return si->si_upper; } #else + +/* + * This deals with old version of _sigfault in some distros: + * + +old _sigfault: + struct { + void *si_addr; + } _sigfault; + +new _sigfault: + struct { + void __user *_addr; + int _trapno; + short _addr_lsb; + union { + struct { + void __user *_lower; + void __user *_upper; + } _addr_bnd; + __u32 _pkey; + }; + } _sigfault; + * + */ + static inline void **__si_bounds_hack(siginfo_t *si) { void *sigfault = &si->_sifields._sigfault; void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault); - void **__si_lower = end_sigfault; + int *trapno = (int*)end_sigfault; + /* skip _trapno and _addr_lsb */ + void **__si_lower = (void**)(trapno + 2); return __si_lower; } @@ -331,7 +359,7 @@ static inline void *__si_bounds_lower(siginfo_t *si) static inline void *__si_bounds_upper(siginfo_t *si) { - return (*__si_bounds_hack(si)) + sizeof(void *); + return *(__si_bounds_hack(si) + 1); } #endif From 325cbb04dc61692ba01445c34fd551ad91df9e6d Mon Sep 17 00:00:00 2001 From: Jia Zhang Date: Mon, 1 Jan 2018 09:52:10 +0800 Subject: [PATCH 2065/3326] x86/cpu: Rename cpu_data.x86_mask to cpu_data.x86_stepping commit b399151cb48db30ad1e0e93dd40d68c6d007b637 upstream. x86_mask is a confusing name which is hard to associate with the processor's stepping. Additionally, correct an indent issue in lib/cpu.c. Signed-off-by: Jia Zhang [ Updated it to more recent kernels. ] Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: bp@alien8.de Cc: tony.luck@intel.com Link: http://lkml.kernel.org/r/1514771530-70829-1-git-send-email-qianyue.zj@alibaba-inc.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/intel/core.c | 2 +- arch/x86/events/intel/lbr.c | 2 +- arch/x86/events/intel/p6.c | 2 +- arch/x86/include/asm/acpi.h | 2 +- arch/x86/include/asm/processor.h | 2 +- arch/x86/kernel/amd_nb.c | 2 +- arch/x86/kernel/apic/apic.c | 6 +++--- arch/x86/kernel/asm-offsets_32.c | 2 +- arch/x86/kernel/cpu/amd.c | 28 +++++++++++++-------------- arch/x86/kernel/cpu/centaur.c | 4 ++-- arch/x86/kernel/cpu/common.c | 8 ++++---- arch/x86/kernel/cpu/cyrix.c | 2 +- arch/x86/kernel/cpu/intel.c | 18 ++++++++--------- arch/x86/kernel/cpu/intel_rdt.c | 2 +- arch/x86/kernel/cpu/microcode/intel.c | 4 ++-- arch/x86/kernel/cpu/mtrr/generic.c | 2 +- arch/x86/kernel/cpu/mtrr/main.c | 4 ++-- arch/x86/kernel/cpu/proc.c | 4 ++-- arch/x86/kernel/head_32.S | 4 ++-- arch/x86/kernel/mpparse.c | 2 +- arch/x86/lib/cpu.c | 2 +- drivers/char/hw_random/via-rng.c | 2 +- drivers/cpufreq/acpi-cpufreq.c | 2 +- drivers/cpufreq/longhaul.c | 6 +++--- drivers/cpufreq/p4-clockmod.c | 2 +- drivers/cpufreq/powernow-k7.c | 2 +- drivers/cpufreq/speedstep-centrino.c | 4 ++-- drivers/cpufreq/speedstep-lib.c | 6 +++--- drivers/crypto/padlock-aes.c | 2 +- drivers/edac/amd64_edac.c | 2 +- drivers/hwmon/coretemp.c | 6 +++--- drivers/hwmon/hwmon-vid.c | 2 +- drivers/hwmon/k10temp.c | 2 +- drivers/hwmon/k8temp.c | 2 +- drivers/video/fbdev/geode/video_gx.c | 2 +- 35 files changed, 73 insertions(+), 73 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 09c26a4f139c12..1c2558430cf07d 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3559,7 +3559,7 @@ static int intel_snb_pebs_broken(int cpu) break; case INTEL_FAM6_SANDYBRIDGE_X: - switch (cpu_data(cpu).x86_mask) { + switch (cpu_data(cpu).x86_stepping) { case 6: rev = 0x618; break; case 7: rev = 0x70c; break; } diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index ae64d0b69729db..cf372b90557ed4 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1186,7 +1186,7 @@ void __init intel_pmu_lbr_init_atom(void) * on PMU interrupt */ if (boot_cpu_data.x86_model == 28 - && boot_cpu_data.x86_mask < 10) { + && boot_cpu_data.x86_stepping < 10) { pr_cont("LBR disabled due to erratum"); return; } diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c index a5604c3529308b..408879b0c0d4e4 100644 --- a/arch/x86/events/intel/p6.c +++ b/arch/x86/events/intel/p6.c @@ -234,7 +234,7 @@ static __initconst const struct x86_pmu p6_pmu = { static __init void p6_pmu_rdpmc_quirk(void) { - if (boot_cpu_data.x86_mask < 9) { + if (boot_cpu_data.x86_stepping < 9) { /* * PPro erratum 26; fixed in stepping 9 and above. */ diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 8d0ec9df1cbeb5..f077401869ee27 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) if (boot_cpu_data.x86 == 0x0F && boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86_model <= 0x05 && - boot_cpu_data.x86_mask < 0x0A) + boot_cpu_data.x86_stepping < 0x0A) return 1; else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E)) return 1; diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index db3e68bcfd9b1d..18852c2535ce8e 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -91,7 +91,7 @@ struct cpuinfo_x86 { __u8 x86; /* CPU family */ __u8 x86_vendor; /* CPU vendor */ __u8 x86_model; - __u8 x86_mask; + __u8 x86_stepping; #ifdef CONFIG_X86_64 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ int x86_tlbsize; diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 6db28f17ff2884..c88e0b127810f2 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -235,7 +235,7 @@ int amd_cache_northbridges(void) if (boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model >= 0x8 && (boot_cpu_data.x86_model > 0x9 || - boot_cpu_data.x86_mask >= 0x1)) + boot_cpu_data.x86_stepping >= 0x1)) amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; if (boot_cpu_data.x86 == 0x15) diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 89c7c8569e5e75..5942aa5f569bf3 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -553,7 +553,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); static u32 hsx_deadline_rev(void) { - switch (boot_cpu_data.x86_mask) { + switch (boot_cpu_data.x86_stepping) { case 0x02: return 0x3a; /* EP */ case 0x04: return 0x0f; /* EX */ } @@ -563,7 +563,7 @@ static u32 hsx_deadline_rev(void) static u32 bdx_deadline_rev(void) { - switch (boot_cpu_data.x86_mask) { + switch (boot_cpu_data.x86_stepping) { case 0x02: return 0x00000011; case 0x03: return 0x0700000e; case 0x04: return 0x0f00000c; @@ -575,7 +575,7 @@ static u32 bdx_deadline_rev(void) static u32 skx_deadline_rev(void) { - switch (boot_cpu_data.x86_mask) { + switch (boot_cpu_data.x86_stepping) { case 0x03: return 0x01000136; case 0x04: return 0x02000014; } diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index fa1261eefa16e7..f91ba53e06c8b9 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -18,7 +18,7 @@ void foo(void) OFFSET(CPUINFO_x86, cpuinfo_x86, x86); OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor); OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model); - OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask); + OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping); OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level); OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability); OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ea831c85819583..e7d5a7883632ca 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -119,7 +119,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) return; } - if (c->x86_model == 6 && c->x86_mask == 1) { + if (c->x86_model == 6 && c->x86_stepping == 1) { const int K6_BUG_LOOP = 1000000; int n; void (*f_vide)(void); @@ -149,7 +149,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) /* K6 with old style WHCR */ if (c->x86_model < 8 || - (c->x86_model == 8 && c->x86_mask < 8)) { + (c->x86_model == 8 && c->x86_stepping < 8)) { /* We can only write allocate on the low 508Mb */ if (mbytes > 508) mbytes = 508; @@ -168,7 +168,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) return; } - if ((c->x86_model == 8 && c->x86_mask > 7) || + if ((c->x86_model == 8 && c->x86_stepping > 7) || c->x86_model == 9 || c->x86_model == 13) { /* The more serious chips .. */ @@ -221,7 +221,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx * As per AMD technical note 27212 0.2 */ - if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { + if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) { rdmsr(MSR_K7_CLK_CTL, l, h); if ((l & 0xfff00000) != 0x20000000) { pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", @@ -241,12 +241,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c) * but they are not certified as MP capable. */ /* Athlon 660/661 is valid. */ - if ((c->x86_model == 6) && ((c->x86_mask == 0) || - (c->x86_mask == 1))) + if ((c->x86_model == 6) && ((c->x86_stepping == 0) || + (c->x86_stepping == 1))) return; /* Duron 670 is valid */ - if ((c->x86_model == 7) && (c->x86_mask == 0)) + if ((c->x86_model == 7) && (c->x86_stepping == 0)) return; /* @@ -256,8 +256,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c) * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for * more. */ - if (((c->x86_model == 6) && (c->x86_mask >= 2)) || - ((c->x86_model == 7) && (c->x86_mask >= 1)) || + if (((c->x86_model == 6) && (c->x86_stepping >= 2)) || + ((c->x86_model == 7) && (c->x86_stepping >= 1)) || (c->x86_model > 7)) if (cpu_has(c, X86_FEATURE_MP)) return; @@ -583,7 +583,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) /* Set MTRR capability flag if appropriate */ if (c->x86 == 5) if (c->x86_model == 13 || c->x86_model == 9 || - (c->x86_model == 8 && c->x86_mask >= 8)) + (c->x86_model == 8 && c->x86_stepping >= 8)) set_cpu_cap(c, X86_FEATURE_K6_MTRR); #endif #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) @@ -769,7 +769,7 @@ static void init_amd_zn(struct cpuinfo_x86 *c) * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects * all up to and including B1. */ - if (c->x86_model <= 1 && c->x86_mask <= 1) + if (c->x86_model <= 1 && c->x86_stepping <= 1) set_cpu_cap(c, X86_FEATURE_CPB); } @@ -880,11 +880,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) /* AMD errata T13 (order #21922) */ if ((c->x86 == 6)) { /* Duron Rev A0 */ - if (c->x86_model == 3 && c->x86_mask == 0) + if (c->x86_model == 3 && c->x86_stepping == 0) size = 64; /* Tbird rev A1/A2 */ if (c->x86_model == 4 && - (c->x86_mask == 0 || c->x86_mask == 1)) + (c->x86_stepping == 0 || c->x86_stepping == 1)) size = 256; } return size; @@ -1021,7 +1021,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) } /* OSVW unavailable or ID unknown, match family-model-stepping range */ - ms = (cpu->x86_model << 4) | cpu->x86_mask; + ms = (cpu->x86_model << 4) | cpu->x86_stepping; while ((range = *erratum++)) if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && (ms >= AMD_MODEL_RANGE_START(range)) && diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 68bc6d9b313264..595be776727d85 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -136,7 +136,7 @@ static void init_centaur(struct cpuinfo_x86 *c) clear_cpu_cap(c, X86_FEATURE_TSC); break; case 8: - switch (c->x86_mask) { + switch (c->x86_stepping) { default: name = "2"; break; @@ -211,7 +211,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) * - Note, it seems this may only be in engineering samples. */ if ((c->x86 == 6) && (c->x86_model == 9) && - (c->x86_mask == 1) && (size == 65)) + (c->x86_stepping == 1) && (size == 65)) size -= 1; return size; } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 92b66e21bae5b2..74dcb2127fd8ff 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -707,7 +707,7 @@ void cpu_detect(struct cpuinfo_x86 *c) cpuid(0x00000001, &tfms, &misc, &junk, &cap0); c->x86 = x86_family(tfms); c->x86_model = x86_model(tfms); - c->x86_mask = x86_stepping(tfms); + c->x86_stepping = x86_stepping(tfms); if (cap0 & (1<<19)) { c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; @@ -1162,7 +1162,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) c->loops_per_jiffy = loops_per_jiffy; c->x86_cache_size = -1; c->x86_vendor = X86_VENDOR_UNKNOWN; - c->x86_model = c->x86_mask = 0; /* So far unknown... */ + c->x86_model = c->x86_stepping = 0; /* So far unknown... */ c->x86_vendor_id[0] = '\0'; /* Unset */ c->x86_model_id[0] = '\0'; /* Unset */ c->x86_max_cores = 1; @@ -1353,8 +1353,8 @@ void print_cpu_info(struct cpuinfo_x86 *c) pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); - if (c->x86_mask || c->cpuid_level >= 0) - pr_cont(", stepping: 0x%x)\n", c->x86_mask); + if (c->x86_stepping || c->cpuid_level >= 0) + pr_cont(", stepping: 0x%x)\n", c->x86_stepping); else pr_cont(")\n"); } diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 6b4bb335641f3f..8949b7ae6d9253 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) /* common case step number/rev -- exceptions handled below */ c->x86_model = (dir1 >> 4) + 1; - c->x86_mask = dir1 & 0xf; + c->x86_stepping = dir1 & 0xf; /* Now cook; the original recipe is by Channing Corn, from Cyrix. * We do the same thing for each generation: we work out diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index ef796f14f7ae50..d19e903214b403 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -146,7 +146,7 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { if (c->x86_model == spectre_bad_microcodes[i].model && - c->x86_mask == spectre_bad_microcodes[i].stepping) + c->x86_stepping == spectre_bad_microcodes[i].stepping) return (c->microcode <= spectre_bad_microcodes[i].microcode); } return false; @@ -193,7 +193,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) * need the microcode to have already been loaded... so if it is * not, recommend a BIOS update and disable large pages. */ - if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && + if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 && c->microcode < 0x20e) { pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); clear_cpu_cap(c, X86_FEATURE_PSE); @@ -209,7 +209,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) /* CPUID workaround for 0F33/0F34 CPU */ if (c->x86 == 0xF && c->x86_model == 0x3 - && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) + && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4)) c->x86_phys_bits = 36; /* @@ -307,7 +307,7 @@ int ppro_with_ram_bug(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 1 && - boot_cpu_data.x86_mask < 8) { + boot_cpu_data.x86_stepping < 8) { pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n"); return 1; } @@ -324,7 +324,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c) * Mask B, Pentium, but not Pentium MMX */ if (c->x86 == 5 && - c->x86_mask >= 1 && c->x86_mask <= 4 && + c->x86_stepping >= 1 && c->x86_stepping <= 4 && c->x86_model <= 3) { /* * Remember we have B step Pentia with bugs @@ -367,7 +367,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until * model 3 mask 3 */ - if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) + if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633) clear_cpu_cap(c, X86_FEATURE_SEP); /* @@ -385,7 +385,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) * P4 Xeon erratum 037 workaround. * Hardware prefetcher may cause stale data to be loaded into the cache. */ - if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { + if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) { if (msr_set_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) { pr_info("CPU: C0 stepping P4 Xeon detected.\n"); @@ -400,7 +400,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) * Specification Update"). */ if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 && - (c->x86_mask < 0x6 || c->x86_mask == 0xb)) + (c->x86_stepping < 0x6 || c->x86_stepping == 0xb)) set_cpu_bug(c, X86_BUG_11AP); @@ -647,7 +647,7 @@ static void init_intel(struct cpuinfo_x86 *c) case 6: if (l2 == 128) p = "Celeron (Mendocino)"; - else if (c->x86_mask == 0 || c->x86_mask == 5) + else if (c->x86_stepping == 0 || c->x86_stepping == 5) p = "Celeron-A"; break; diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index 99442370de40d7..18dd8f22e353ab 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -771,7 +771,7 @@ static __init void rdt_quirks(void) cache_alloc_hsw_probe(); break; case INTEL_FAM6_SKYLAKE_X: - if (boot_cpu_data.x86_mask <= 4) + if (boot_cpu_data.x86_stepping <= 4) set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); } } diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index f7c55b0e753ad0..b94279bb5c0462 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -921,7 +921,7 @@ static bool is_blacklisted(unsigned int cpu) */ if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X && - c->x86_mask == 0x01 && + c->x86_stepping == 0x01 && llc_size_per_core > 2621440 && c->microcode < 0x0b000021) { pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); @@ -944,7 +944,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device, return UCODE_NFOUND; sprintf(name, "intel-ucode/%02x-%02x-%02x", - c->x86, c->x86_model, c->x86_mask); + c->x86, c->x86_model, c->x86_stepping); if (request_firmware_direct(&firmware, name, device)) { pr_debug("data file %s load failed\n", name); diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index fdc55215d44d08..e12ee86906c625 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size, */ if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 1 && - boot_cpu_data.x86_mask <= 7) { + boot_cpu_data.x86_stepping <= 7) { if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); return -EINVAL; diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 40d5a8a752125e..7468de4290873a 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -711,8 +711,8 @@ void __init mtrr_bp_init(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 0x3 && - (boot_cpu_data.x86_mask == 0x3 || - boot_cpu_data.x86_mask == 0x4)) + (boot_cpu_data.x86_stepping == 0x3 || + boot_cpu_data.x86_stepping == 0x4)) phys_addr = 36; size_or_mask = SIZE_OR_MASK_BITS(phys_addr); diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index e7ecedafa1c8f7..ee4cc388e8d30c 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -72,8 +72,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) c->x86_model, c->x86_model_id[0] ? c->x86_model_id : "unknown"); - if (c->x86_mask || c->cpuid_level >= 0) - seq_printf(m, "stepping\t: %d\n", c->x86_mask); + if (c->x86_stepping || c->cpuid_level >= 0) + seq_printf(m, "stepping\t: %d\n", c->x86_stepping); else seq_puts(m, "stepping\t: unknown\n"); if (c->microcode) diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index c29020907886a3..b59e4fb40fd998 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -37,7 +37,7 @@ #define X86 new_cpu_data+CPUINFO_x86 #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor #define X86_MODEL new_cpu_data+CPUINFO_x86_model -#define X86_MASK new_cpu_data+CPUINFO_x86_mask +#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability @@ -332,7 +332,7 @@ ENTRY(startup_32_smp) shrb $4,%al movb %al,X86_MODEL andb $0x0f,%cl # mask mask revision - movb %cl,X86_MASK + movb %cl,X86_STEPPING movl %edx,X86_CAPABILITY .Lis486: diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 3a4b12809ab5f8..bc6bc6689e68a7 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -407,7 +407,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01; processor.cpuflag = CPU_ENABLED; processor.cpufeature = (boot_cpu_data.x86 << 8) | - (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; + (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping; processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX]; processor.reserved[0] = 0; processor.reserved[1] = 0; diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c index d6f848d1211d4b..2dd1fe13a37b36 100644 --- a/arch/x86/lib/cpu.c +++ b/arch/x86/lib/cpu.c @@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig) { unsigned int fam, model; - fam = x86_family(sig); + fam = x86_family(sig); model = (sig >> 4) & 0xf; diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index d1f5bb534e0e36..6e9df558325bea 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rng) /* Enable secondary noise source on CPUs where it is present. */ /* Nehemiah stepping 8 and higher */ - if ((c->x86_model == 9) && (c->x86_mask > 7)) + if ((c->x86_model == 9) && (c->x86_stepping > 7)) lo |= VIA_NOISESRC2; /* Esther */ diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 3a2ca0f79daf28..d0c34df0529c8a 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) if (c->x86_vendor == X86_VENDOR_INTEL) { if ((c->x86 == 15) && (c->x86_model == 6) && - (c->x86_mask == 8)) { + (c->x86_stepping == 8)) { pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n"); return -ENODEV; } diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index c46a12df40dd83..d5e27bc7585a94 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) break; case 7: - switch (c->x86_mask) { + switch (c->x86_stepping) { case 0: longhaul_version = TYPE_LONGHAUL_V1; cpu_model = CPU_SAMUEL2; @@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) break; case 1 ... 15: longhaul_version = TYPE_LONGHAUL_V2; - if (c->x86_mask < 8) { + if (c->x86_stepping < 8) { cpu_model = CPU_SAMUEL2; cpuname = "C3 'Samuel 2' [C5B]"; } else { @@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) numscales = 32; memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); - switch (c->x86_mask) { + switch (c->x86_stepping) { case 0 ... 1: cpu_model = CPU_NEHEMIAH; cpuname = "C3 'Nehemiah A' [C5XLOE]"; diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index fd77812313f3ec..a25741b1281b46 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) #endif /* Errata workaround */ - cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; + cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping; switch (cpuid) { case 0x0f07: case 0x0f0a: diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 80ac313e6c59c1..302e9ce793a017 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -131,7 +131,7 @@ static int check_powernow(void) return 0; } - if ((c->x86_model == 6) && (c->x86_mask == 0)) { + if ((c->x86_model == 6) && (c->x86_stepping == 0)) { pr_info("K7 660[A0] core detected, enabling errata workarounds\n"); have_a0 = 1; } diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index 41bc5397f4bbb3..4fa5adf16c7014 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c @@ -37,7 +37,7 @@ struct cpu_id { __u8 x86; /* CPU family */ __u8 x86_model; /* model */ - __u8 x86_mask; /* stepping */ + __u8 x86_stepping; /* stepping */ }; enum { @@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, { if ((c->x86 == x->x86) && (c->x86_model == x->x86_model) && - (c->x86_mask == x->x86_mask)) + (c->x86_stepping == x->x86_stepping)) return 1; return 0; } diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index ccab452a4ef5b7..dd7bb00991f4b8 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c @@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void) ebx = cpuid_ebx(0x00000001); ebx &= 0x000000FF; - pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); + pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping); - switch (c->x86_mask) { + switch (c->x86_stepping) { case 4: /* * B-stepping [M-P4-M] @@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void) msr_lo, msr_hi); if ((msr_hi & (1<<18)) && (relaxed_check ? 1 : (msr_hi & (3<<24)))) { - if (c->x86_mask == 0x01) { + if (c->x86_stepping == 0x01) { pr_debug("early PIII version\n"); return SPEEDSTEP_CPU_PIII_C_EARLY; } else diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index b3869748cc6b1f..c939f18f70cc0b 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -512,7 +512,7 @@ static int __init padlock_init(void) printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); - if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { + if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) { ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index ac2f30295efe45..59ce32e405ac60 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) struct amd64_family_type *fam_type = NULL; pvt->ext_model = boot_cpu_data.x86_model >> 4; - pvt->stepping = boot_cpu_data.x86_mask; + pvt->stepping = boot_cpu_data.x86_stepping; pvt->model = boot_cpu_data.x86_model; pvt->fam = boot_cpu_data.x86; diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index c13a4fd86b3cba..a42744c7665b53 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -268,13 +268,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) { const struct tjmax_model *tm = &tjmax_model_table[i]; if (c->x86_model == tm->model && - (tm->mask == ANY || c->x86_mask == tm->mask)) + (tm->mask == ANY || c->x86_stepping == tm->mask)) return tm->tjmax; } /* Early chips have no MSR for TjMax */ - if (c->x86_model == 0xf && c->x86_mask < 4) + if (c->x86_model == 0xf && c->x86_stepping < 4) usemsr_ee = 0; if (c->x86_model > 0xe && usemsr_ee) { @@ -425,7 +425,7 @@ static int chk_ucode_version(unsigned int cpu) * Readings might stop update when processor visited too deep sleep, * fixed for stepping D0 (6EC). */ - if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) { + if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) { pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n"); return -ENODEV; } diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c index ef91b8a6754924..84e91286fc4fde 100644 --- a/drivers/hwmon/hwmon-vid.c +++ b/drivers/hwmon/hwmon-vid.c @@ -293,7 +293,7 @@ u8 vid_which_vrm(void) if (c->x86 < 6) /* Any CPU with family lower than 6 */ return 0; /* doesn't have VID */ - vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor); + vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor); if (vrm_ret == 134) vrm_ret = get_via_model_d_vrm(); if (vrm_ret == 0) diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index ce3b91f22e30af..5c740996aa6253 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -179,7 +179,7 @@ static bool has_erratum_319(struct pci_dev *pdev) * and AM3 formats, but that's the best we can do. */ return boot_cpu_data.x86_model < 4 || - (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2); + (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2); } static int k10temp_probe(struct pci_dev *pdev, diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c index 5a632bcf869bbf..e59f9113fb93b0 100644 --- a/drivers/hwmon/k8temp.c +++ b/drivers/hwmon/k8temp.c @@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev, return -ENOMEM; model = boot_cpu_data.x86_model; - stepping = boot_cpu_data.x86_mask; + stepping = boot_cpu_data.x86_stepping; /* feature available since SH-C0, exclude older revisions */ if ((model == 4 && stepping == 0) || diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c index 6082f653c68a44..67773e8bbb954b 100644 --- a/drivers/video/fbdev/geode/video_gx.c +++ b/drivers/video/fbdev/geode/video_gx.c @@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info) int timeout = 1000; /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */ - if (cpu_data(0).x86_mask == 1) { + if (cpu_data(0).x86_stepping == 1) { pll_table = gx_pll_table_14MHz; pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz); } else { From 7d7ebee6ce113fff252f1b923fbbbad83b1221b8 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 14 Feb 2018 10:14:17 +0300 Subject: [PATCH 2066/3326] x86/spectre: Fix an error message commit 9de29eac8d2189424d81c0d840cd0469aa3d41c8 upstream. If i == ARRAY_SIZE(mitigation_options) then we accidentally print garbage from one space beyond the end of the mitigation_options[] array. Signed-off-by: Dan Carpenter Cc: Andy Lutomirski Cc: Borislav Petkov Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: KarimAllah Ahmed Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: kernel-janitors@vger.kernel.org Fixes: 9005c6834c0f ("x86/spectre: Simplify spectre_v2 command line parsing") Link: http://lkml.kernel.org/r/20180214071416.GA26677@mwanda Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/bugs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 4acf16a76d1e45..d71c8b54b696d4 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -174,7 +174,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) } if (i >= ARRAY_SIZE(mitigation_options)) { - pr_err("unknown option (%s). Switching to AUTO select\n", mitigation_options[i].option); + pr_err("unknown option (%s). Switching to AUTO select\n", arg); return SPECTRE_V2_CMD_AUTO; } } From 231d0c70be748bba8982b6bda3daa613bf5c8a29 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 13 Feb 2018 13:22:08 -0600 Subject: [PATCH 2067/3326] x86/cpu: Change type of x86_cache_size variable to unsigned int commit 24dbc6000f4b9b0ef5a9daecb161f1907733765a upstream. Currently, x86_cache_size is of type int, which makes no sense as we will never have a valid cache size equal or less than 0. So instead of initializing this variable to -1, it can perfectly be initialized to 0 and use it as an unsigned variable instead. Suggested-by: Thomas Gleixner Signed-off-by: Gustavo A. R. Silva Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Addresses-Coverity-ID: 1464429 Link: http://lkml.kernel.org/r/20180213192208.GA26414@embeddedor.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/processor.h | 2 +- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/cpu/microcode/intel.c | 2 +- arch/x86/kernel/cpu/proc.c | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 18852c2535ce8e..15fc074bd62813 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -109,7 +109,7 @@ struct cpuinfo_x86 { char x86_vendor_id[16]; char x86_model_id[64]; /* in KB - valid for CPUS which support this call: */ - int x86_cache_size; + unsigned int x86_cache_size; int x86_cache_alignment; /* In bytes */ /* Cache QoS architectural values: */ int x86_cache_max_rmid; /* max index */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 74dcb2127fd8ff..651b7afed4dada 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1160,7 +1160,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) int i; c->loops_per_jiffy = loops_per_jiffy; - c->x86_cache_size = -1; + c->x86_cache_size = 0; c->x86_vendor = X86_VENDOR_UNKNOWN; c->x86_model = c->x86_stepping = 0; /* So far unknown... */ c->x86_vendor_id[0] = '\0'; /* Unset */ diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index b94279bb5c0462..a15db2b4e0d66a 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -982,7 +982,7 @@ static struct microcode_ops microcode_intel_ops = { static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) { - u64 llc_size = c->x86_cache_size * 1024; + u64 llc_size = c->x86_cache_size * 1024ULL; do_div(llc_size, c->x86_max_cores); diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index ee4cc388e8d30c..2c8522a39ed5db 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -91,8 +91,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) } /* Cache size */ - if (c->x86_cache_size >= 0) - seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); + if (c->x86_cache_size) + seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size); show_cpuinfo_core(m, c, cpu); show_cpuinfo_misc(m, c); From 6c0398cfebf5d3f6d5ebc6924a3faf26a12935f4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 14 Feb 2018 08:39:11 +0100 Subject: [PATCH 2068/3326] x86/entry/64: Fix CR3 restore in paranoid_exit() commit e48657573481a5dff7cfdc3d57005c80aa816500 upstream. Josh Poimboeuf noticed the following bug: "The paranoid exit code only restores the saved CR3 when it switches back to the user GS. However, even in the kernel GS case, it's possible that it needs to restore a user CR3, if for example, the paranoid exception occurred in the syscall exit path between SWITCH_TO_USER_CR3_STACK and SWAPGS." Josh also confirmed via targeted testing that it's possible to hit this bug. Fix the bug by also restoring CR3 in the paranoid_exit_no_swapgs branch. The reason we haven't seen this bug reported by users yet is probably because "paranoid" entry points are limited to the following cases: idtentry double_fault do_double_fault has_error_code=1 paranoid=2 idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK idtentry machine_check do_mce has_error_code=0 paranoid=1 Amongst those entry points only machine_check is one that will interrupt an IRQS-off critical section asynchronously - and machine check events are rare. The other main asynchronous entries are NMI entries, which can be very high-freq with perf profiling, but they are special: they don't use the 'idtentry' macro but are open coded and restore user CR3 unconditionally so don't have this bug. Reported-and-tested-by: Josh Poimboeuf Reviewed-by: Andy Lutomirski Acked-by: Thomas Gleixner Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Greg Kroah-Hartman Cc: Linus Torvalds Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180214073910.boevmg65upbk3vqb@gmail.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/entry_64.S | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index c5ef704df06930..68a2d76e4f8fc8 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1162,6 +1162,7 @@ ENTRY(paranoid_exit) jmp .Lparanoid_exit_restore .Lparanoid_exit_no_swapgs: TRACE_IRQS_IRETQ_DEBUG + RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 .Lparanoid_exit_restore: jmp restore_regs_and_return_to_kernel END(paranoid_exit) From f02c3f7f6d30fb7d0f245594c7d4a033763baa67 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 17 Jan 2018 23:52:03 -0500 Subject: [PATCH 2069/3326] drm/ttm: Don't add swapped BOs to swap-LRU list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit fd5002d6a3c602664b07668a24df4ef7a43bf078 upstream. A BO that's already swapped would be added back to the swap-LRU list for example if its validation failed under high memory pressure. This could later lead to swapping it out again and leaking previous swap storage. This commit adds a condition to prevent that from happening. v2: Check page_flags instead of swap_storage Signed-off-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/ttm/ttm_bo.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c088703777e262..68eed684dff520 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -175,7 +175,8 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) list_add_tail(&bo->lru, &man->lru[bo->priority]); kref_get(&bo->list_kref); - if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { + if (bo->ttm && !(bo->ttm->page_flags & + (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) { list_add_tail(&bo->swap, &bo->glob->swap_lru[bo->priority]); kref_get(&bo->list_kref); From 15cdc016f109e2b8b7ede8c68c413c9b97c09fa8 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Fri, 26 Jan 2018 09:32:29 -0500 Subject: [PATCH 2070/3326] drm/ttm: Fix 'buf' pointer update in ttm_bo_vm_access_kmap() (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 95244db2d3f743f37e69446a2807dd1a42750542 upstream. The buf pointer was not being incremented inside the loop meaning the same block of data would be read or written repeatedly. (v2) Change 'buf' pointer to uint8_t* type Cc: stable@vger.kernel.org Fixes: 09ac4fcb3f25 ("drm/ttm: Implement vm_operations_struct.access v2") Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/ttm/ttm_bo_vm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index c8ebb757e36b52..b17d0d38f290a6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -299,7 +299,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma) static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, unsigned long offset, - void *buf, int len, int write) + uint8_t *buf, int len, int write) { unsigned long page = offset >> PAGE_SHIFT; unsigned long bytes_left = len; @@ -328,6 +328,7 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, ttm_bo_kunmap(&map); page++; + buf += bytes; bytes_left -= bytes; offset = 0; } while (bytes_left); From b51521c9a191083033d3d8d7174e41d33b71a9bf Mon Sep 17 00:00:00 2001 From: Ray Strode Date: Mon, 27 Nov 2017 16:50:09 -0500 Subject: [PATCH 2071/3326] drm/qxl: unref cursor bo when finished with it commit 16c6db3688734b27487a42d0c2a1062d0b2bad03 upstream. qxl_cursor_atomic_update allocs a bo for the cursor that it never frees up at the end of the function. This commit fixes that. Signed-off-by: Ray Strode Cc: stable@vger.kernel.org Signed-off-by: Dave Airlie Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/qxl/qxl_display.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index afbf50d0c08fa1..ced6564b5814a6 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -564,7 +564,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, struct qxl_cursor_cmd *cmd; struct qxl_cursor *cursor; struct drm_gem_object *obj; - struct qxl_bo *cursor_bo, *user_bo = NULL; + struct qxl_bo *cursor_bo = NULL, *user_bo = NULL; int ret; void *user_ptr; int size = 64*64*4; @@ -634,6 +634,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_bo_unref(&cursor_bo); + return; out_backoff: From dc2b0e41fc5e6d11fe4539317e57cf84c67cd18d Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Wed, 17 Jan 2018 17:24:13 -0500 Subject: [PATCH 2072/3326] drm/amd/powerplay: Fix smu_table_entry.handle type commit adab595d16abe48e9c097f000bf8921d35b28fb7 upstream. The handle describes kernel logical address, should be unsigned long and not uint32_t. Fixes KASAN error and GFP on driver unload. Reviewed-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Andrey Grodzovsky Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h index 262c8ded87c01a..dafc9c4b1e6f20 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h @@ -40,7 +40,7 @@ struct smu_table_entry { uint32_t table_addr_high; uint32_t table_addr_low; uint8_t *table; - uint32_t handle; + unsigned long handle; }; struct smu_table_array { From e4a6d687e5d9e603f7188ca2ee88040c5fdd03dd Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 18 Jan 2018 16:40:16 +0100 Subject: [PATCH 2073/3326] drm/ast: Load lut in crtc_commit commit 24b8ef699e8221d2b7f813adaab13eec053e1507 upstream. In the past the ast driver relied upon the fbdev emulation helpers to call ->load_lut at boot-up. But since commit b8e2b0199cc377617dc238f5106352c06dcd3fa2 Author: Peter Rosin Date: Tue Jul 4 12:36:57 2017 +0200 drm/fb-helper: factor out pseudo-palette that's cleaned up and drivers are expected to boot into a consistent lut state. This patch fixes that. Fixes: b8e2b0199cc3 ("drm/fb-helper: factor out pseudo-palette") Cc: Peter Rosin Cc: Daniel Vetter Cc: # v4.14+ Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=198123 Cc: Bill Fraser Reported-and-Tested-by: Bill Fraser Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/ast/ast_mode.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 6f3849ec0c1d0e..e9f1e6fe7b943b 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -644,6 +644,7 @@ static void ast_crtc_commit(struct drm_crtc *crtc) { struct ast_private *ast = crtc->dev->dev_private; ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0); + ast_crtc_load_lut(crtc); } From 254d48c8cc3b1f478f2c75349930da6e12abdd39 Mon Sep 17 00:00:00 2001 From: Shanker Donthineni Date: Sun, 11 Feb 2018 19:16:15 -0600 Subject: [PATCH 2074/3326] arm64: Add missing Falkor part number for branch predictor hardening commit 16e574d762ac5512eb922ac0ac5eed360b7db9d8 upstream. References to CPU part number MIDR_QCOM_FALKOR were dropped from the mailing list patch due to mainline/arm64 branch dependency. So this patch adds the missing part number. Fixes: ec82b567a74f ("arm64: Implement branch predictor hardening for Falkor") Acked-by: Marc Zyngier Signed-off-by: Shanker Donthineni Signed-off-by: Catalin Marinas Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/cpu_errata.c | 9 +++++++++ arch/arm64/kvm/hyp/switch.c | 4 +++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 07823595b7f016..52f15cd896e11a 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -406,6 +406,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), + .enable = qcom_enable_link_stack_sanitization, + }, + { + .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), + }, { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 79364d3455c046..e08ae6b6b63e9f 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -371,8 +371,10 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) u32 midr = read_cpuid_id(); /* Apply BTAC predictors mitigation to all Falkor chips */ - if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1) + if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || + ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) { __qcom_hyp_sanitize_btac_predictors(); + } } fp_enabled = __fpsimd_enabled(); From 808700475218f382591313acabd4812c30ec7fa3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 21 Nov 2017 12:09:38 -0500 Subject: [PATCH 2075/3326] drm/radeon: Add dpm quirk for Jet PRO (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 239b5f64e12b1f09f506c164dff0374924782979 upstream. Fixes stability issues. v2: clamp sclk to 600 Mhz Bug: https://bugs.freedesktop.org/show_bug.cgi?id=103370 Acked-by: Christian König Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/radeon/si_dpm.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index ee3e74266a1332..97a0a639dad90b 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2984,6 +2984,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, (rdev->pdev->device == 0x6667)) { max_sclk = 75000; } + if ((rdev->pdev->revision == 0xC3) || + (rdev->pdev->device == 0x6665)) { + max_sclk = 60000; + max_mclk = 80000; + } } else if (rdev->family == CHIP_OLAND) { if ((rdev->pdev->revision == 0xC7) || (rdev->pdev->revision == 0x80) || From a29adc04cb6d5ec4ac71c63ceeb67efc011a4b65 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 27 Jan 2018 15:28:15 +0100 Subject: [PATCH 2076/3326] drm/radeon: adjust tested variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 3a61b527b4e1f285d21b6e9e623dc45cf8bb391f upstream. Check the variable that was most recently initialized. The semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @@ expression x, y, f, g, e, m; statement S1,S2,S3,S4; @@ x = f(...); if (\(<+...x...+>\&e\)) S1 else S2 ( x = g(...); | m = g(...,&x,...); | y = g(...); *if (e) S3 else S4 ) // Signed-off-by: Julia Lawall Reviewed-by: Christian König Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/radeon/radeon_uvd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index d34d1cf3389576..95f4db70dd2232 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -995,7 +995,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, /* calc dclk divider with current vco freq */ dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, pd_min, pd_even); - if (vclk_div > pd_max) + if (dclk_div > pd_max) break; /* vco is too big, it has to stop */ /* calc score with current vco freq */ From c3bdd547042fa2e71825d7cc5e871b558d4153e9 Mon Sep 17 00:00:00 2001 From: Stewart Smith Date: Tue, 2 Aug 2016 11:50:16 +1000 Subject: [PATCH 2077/3326] rtc-opal: Fix handling of firmware error codes, prevent busy loops commit 5b8b58063029f02da573120ef4dc9079822e3cda upstream. According to the OPAL docs: skiboot-5.2.5/doc/opal-api/opal-rtc-read-3.txt skiboot-5.2.5/doc/opal-api/opal-rtc-write-4.txt OPAL_HARDWARE may be returned from OPAL_RTC_READ or OPAL_RTC_WRITE and this indicates either a transient or permanent error. Prior to this patch, Linux was not dealing with OPAL_HARDWARE being a permanent error particularly well, in that you could end up in a busy loop. This was not too hard to trigger on an AMI BMC based OpenPOWER machine doing a continuous "ipmitool mc reset cold" to the BMC, the result of that being that we'd get stuck in an infinite loop in opal_get_rtc_time(). We now retry a few times before returning the error higher up the stack. Fixes: 16b1d26e77b1 ("rtc/tpo: Driver to support rtc and wakeup on PowerNV platform") Cc: stable@vger.kernel.org # v3.19+ Signed-off-by: Stewart Smith Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- drivers/rtc/rtc-opal.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c index e2a946c0e667e1..304e891e35fcb0 100644 --- a/drivers/rtc/rtc-opal.c +++ b/drivers/rtc/rtc-opal.c @@ -58,6 +58,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms) static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) { long rc = OPAL_BUSY; + int retries = 10; u32 y_m_d; u64 h_m_s_ms; __be32 __y_m_d; @@ -67,8 +68,11 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); if (rc == OPAL_BUSY_EVENT) opal_poll_events(NULL); - else + else if (retries-- && (rc == OPAL_HARDWARE + || rc == OPAL_INTERNAL_ERROR)) msleep(10); + else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) + break; } if (rc != OPAL_SUCCESS) @@ -84,6 +88,7 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) { long rc = OPAL_BUSY; + int retries = 10; u32 y_m_d = 0; u64 h_m_s_ms = 0; @@ -92,8 +97,11 @@ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) rc = opal_rtc_write(y_m_d, h_m_s_ms); if (rc == OPAL_BUSY_EVENT) opal_poll_events(NULL); - else + else if (retries-- && (rc == OPAL_HARDWARE + || rc == OPAL_INTERNAL_ERROR)) msleep(10); + else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) + break; } return rc == OPAL_SUCCESS ? 0 : -EIO; From e96a219899740432662b58c60a93505ab3dced5c Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Sun, 7 Jan 2018 16:22:35 -0500 Subject: [PATCH 2078/3326] mbcache: initialize entry->e_referenced in mb_cache_entry_create() commit 3876bbe27d04b848750d5310a37d6b76b593f648 upstream. KMSAN reported use of uninitialized |entry->e_referenced| in a condition in mb_cache_shrink(): ================================================================== BUG: KMSAN: use of uninitialized memory in mb_cache_shrink+0x3b4/0xc50 fs/mbcache.c:287 CPU: 2 PID: 816 Comm: kswapd1 Not tainted 4.11.0-rc5+ #2877 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:16 [inline] dump_stack+0x172/0x1c0 lib/dump_stack.c:52 kmsan_report+0x12a/0x180 mm/kmsan/kmsan.c:927 __msan_warning_32+0x61/0xb0 mm/kmsan/kmsan_instr.c:469 mb_cache_shrink+0x3b4/0xc50 fs/mbcache.c:287 mb_cache_scan+0x67/0x80 fs/mbcache.c:321 do_shrink_slab mm/vmscan.c:397 [inline] shrink_slab+0xc3d/0x12d0 mm/vmscan.c:500 shrink_node+0x208f/0x2fd0 mm/vmscan.c:2603 kswapd_shrink_node mm/vmscan.c:3172 [inline] balance_pgdat mm/vmscan.c:3289 [inline] kswapd+0x160f/0x2850 mm/vmscan.c:3478 kthread+0x46c/0x5f0 kernel/kthread.c:230 ret_from_fork+0x29/0x40 arch/x86/entry/entry_64.S:430 chained origin: save_stack_trace+0x37/0x40 arch/x86/kernel/stacktrace.c:59 kmsan_save_stack_with_flags mm/kmsan/kmsan.c:302 [inline] kmsan_save_stack mm/kmsan/kmsan.c:317 [inline] kmsan_internal_chain_origin+0x12a/0x1f0 mm/kmsan/kmsan.c:547 __msan_store_shadow_origin_1+0xac/0x110 mm/kmsan/kmsan_instr.c:257 mb_cache_entry_create+0x3b3/0xc60 fs/mbcache.c:95 ext4_xattr_cache_insert fs/ext4/xattr.c:1647 [inline] ext4_xattr_block_set+0x4c82/0x5530 fs/ext4/xattr.c:1022 ext4_xattr_set_handle+0x1332/0x20a0 fs/ext4/xattr.c:1252 ext4_xattr_set+0x4d2/0x680 fs/ext4/xattr.c:1306 ext4_xattr_trusted_set+0x8d/0xa0 fs/ext4/xattr_trusted.c:36 __vfs_setxattr+0x703/0x790 fs/xattr.c:149 __vfs_setxattr_noperm+0x27a/0x6f0 fs/xattr.c:180 vfs_setxattr fs/xattr.c:223 [inline] setxattr+0x6ae/0x790 fs/xattr.c:449 path_setxattr+0x1eb/0x380 fs/xattr.c:468 SYSC_lsetxattr+0x8d/0xb0 fs/xattr.c:490 SyS_lsetxattr+0x77/0xa0 fs/xattr.c:486 entry_SYSCALL_64_fastpath+0x13/0x94 origin: save_stack_trace+0x37/0x40 arch/x86/kernel/stacktrace.c:59 kmsan_save_stack_with_flags mm/kmsan/kmsan.c:302 [inline] kmsan_internal_poison_shadow+0xb1/0x1a0 mm/kmsan/kmsan.c:198 kmsan_kmalloc+0x7f/0xe0 mm/kmsan/kmsan.c:337 kmem_cache_alloc+0x1c2/0x1e0 mm/slub.c:2766 mb_cache_entry_create+0x283/0xc60 fs/mbcache.c:86 ext4_xattr_cache_insert fs/ext4/xattr.c:1647 [inline] ext4_xattr_block_set+0x4c82/0x5530 fs/ext4/xattr.c:1022 ext4_xattr_set_handle+0x1332/0x20a0 fs/ext4/xattr.c:1252 ext4_xattr_set+0x4d2/0x680 fs/ext4/xattr.c:1306 ext4_xattr_trusted_set+0x8d/0xa0 fs/ext4/xattr_trusted.c:36 __vfs_setxattr+0x703/0x790 fs/xattr.c:149 __vfs_setxattr_noperm+0x27a/0x6f0 fs/xattr.c:180 vfs_setxattr fs/xattr.c:223 [inline] setxattr+0x6ae/0x790 fs/xattr.c:449 path_setxattr+0x1eb/0x380 fs/xattr.c:468 SYSC_lsetxattr+0x8d/0xb0 fs/xattr.c:490 SyS_lsetxattr+0x77/0xa0 fs/xattr.c:486 entry_SYSCALL_64_fastpath+0x13/0x94 ================================================================== Signed-off-by: Alexander Potapenko Signed-off-by: Eric Biggers Cc: stable@vger.kernel.org # v4.6 Signed-off-by: Greg Kroah-Hartman --- fs/mbcache.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/mbcache.c b/fs/mbcache.c index d818fd23678700..49c5b25bfa8cb2 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -94,6 +94,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, entry->e_key = key; entry->e_value = value; entry->e_reusable = reusable; + entry->e_referenced = 0; head = mb_cache_entry_head(cache, key); hlist_bl_lock(head); hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { From a1341cac93fa9d54f49f5fedf7a577bfc7a7665c Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 29 Jan 2018 00:44:53 +0100 Subject: [PATCH 2079/3326] mmc: sdhci: Implement an SDHCI-specific bounce buffer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit bd9b902798ab14d19ca116b10bde581ddff8f905 upstream. The bounce buffer is gone from the MMC core, and now we found out that there are some (crippled) i.MX boards out there that have broken ADMA (cannot do scatter-gather), and also broken PIO so they must use SDMA. Closer examination shows a less significant slowdown also on SDMA-only capable Laptop hosts. SDMA sets down the number of segments to one, so that each segment gets turned into a singular request that ping-pongs to the block layer before the next request/segment is issued. Apparently it happens a lot that the block layer send requests that include a lot of physically discontiguous segments. My guess is that this phenomenon is coming from the file system. These devices that cannot handle scatterlists in hardware can see major benefits from a DMA-contiguous bounce buffer. This patch accumulates those fragmented scatterlists in a physically contiguous bounce buffer so that we can issue bigger DMA data chunks to/from the card. When tested with a PCI-integrated host (1217:8221) that only supports SDMA: 0b:00.0 SD Host controller: O2 Micro, Inc. OZ600FJ0/OZ900FJ0/OZ600FJS SD/MMC Card Reader Controller (rev 05) This patch gave ~1Mbyte/s improved throughput on large reads and writes when testing using iozone than without the patch. dmesg: sdhci-pci 0000:0b:00.0: SDHCI controller found [1217:8221] (rev 5) mmc0 bounce up to 128 segments into one, max segment size 65536 bytes mmc0: SDHCI controller on PCI [0000:0b:00.0] using DMA On the i.MX SDHCI controllers on the crippled i.MX 25 and i.MX 35 the patch restores the performance to what it was before we removed the bounce buffers. Cc: Pierre Ossman Cc: Benoît Thébaudeau Cc: Fabio Estevam Cc: Benjamin Beckmeyer Cc: stable@vger.kernel.org # v4.14+ Fixes: de3ee99b097d ("mmc: Delete bounce buffer handling") Tested-by: Benjamin Beckmeyer Acked-by: Adrian Hunter Signed-off-by: Linus Walleij Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/sdhci.c | 164 +++++++++++++++++++++++++++++++++++++-- drivers/mmc/host/sdhci.h | 3 + 2 files changed, 159 insertions(+), 8 deletions(-) diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 6152e83ff93528..90cc1977b79231 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -502,8 +503,35 @@ static int sdhci_pre_dma_transfer(struct sdhci_host *host, if (data->host_cookie == COOKIE_PRE_MAPPED) return data->sg_count; - sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, - mmc_get_dma_dir(data)); + /* Bounce write requests to the bounce buffer */ + if (host->bounce_buffer) { + unsigned int length = data->blksz * data->blocks; + + if (length > host->bounce_buffer_size) { + pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", + mmc_hostname(host->mmc), length, + host->bounce_buffer_size); + return -EIO; + } + if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { + /* Copy the data to the bounce buffer */ + sg_copy_to_buffer(data->sg, data->sg_len, + host->bounce_buffer, + length); + } + /* Switch ownership to the DMA */ + dma_sync_single_for_device(host->mmc->parent, + host->bounce_addr, + host->bounce_buffer_size, + mmc_get_dma_dir(data)); + /* Just a dummy value */ + sg_count = 1; + } else { + /* Just access the data directly from memory */ + sg_count = dma_map_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, + mmc_get_dma_dir(data)); + } if (sg_count == 0) return -ENOSPC; @@ -673,6 +701,14 @@ static void sdhci_adma_table_post(struct sdhci_host *host, } } +static u32 sdhci_sdma_address(struct sdhci_host *host) +{ + if (host->bounce_buffer) + return host->bounce_addr; + else + return sg_dma_address(host->data->sg); +} + static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) { u8 count; @@ -858,8 +894,8 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) SDHCI_ADMA_ADDRESS_HI); } else { WARN_ON(sg_cnt != 1); - sdhci_writel(host, sg_dma_address(data->sg), - SDHCI_DMA_ADDRESS); + sdhci_writel(host, sdhci_sdma_address(host), + SDHCI_DMA_ADDRESS); } } @@ -2248,7 +2284,12 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) mrq->data->host_cookie = COOKIE_UNMAPPED; - if (host->flags & SDHCI_REQ_USE_DMA) + /* + * No pre-mapping in the pre hook if we're using the bounce buffer, + * for that we would need two bounce buffers since one buffer is + * in flight when this is getting called. + */ + if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); } @@ -2352,8 +2393,45 @@ static bool sdhci_request_done(struct sdhci_host *host) struct mmc_data *data = mrq->data; if (data && data->host_cookie == COOKIE_MAPPED) { - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, - mmc_get_dma_dir(data)); + if (host->bounce_buffer) { + /* + * On reads, copy the bounced data into the + * sglist + */ + if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { + unsigned int length = data->bytes_xfered; + + if (length > host->bounce_buffer_size) { + pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", + mmc_hostname(host->mmc), + host->bounce_buffer_size, + data->bytes_xfered); + /* Cap it down and continue */ + length = host->bounce_buffer_size; + } + dma_sync_single_for_cpu( + host->mmc->parent, + host->bounce_addr, + host->bounce_buffer_size, + DMA_FROM_DEVICE); + sg_copy_from_buffer(data->sg, + data->sg_len, + host->bounce_buffer, + length); + } else { + /* No copying, just switch ownership */ + dma_sync_single_for_cpu( + host->mmc->parent, + host->bounce_addr, + host->bounce_buffer_size, + mmc_get_dma_dir(data)); + } + } else { + /* Unmap the raw data */ + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, + mmc_get_dma_dir(data)); + } data->host_cookie = COOKIE_UNMAPPED; } } @@ -2636,7 +2714,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) */ if (intmask & SDHCI_INT_DMA_END) { u32 dmastart, dmanow; - dmastart = sg_dma_address(host->data->sg); + + dmastart = sdhci_sdma_address(host); dmanow = dmastart + host->data->bytes_xfered; /* * Force update to the next DMA block boundary. @@ -3217,6 +3296,68 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1) } EXPORT_SYMBOL_GPL(__sdhci_read_caps); +static int sdhci_allocate_bounce_buffer(struct sdhci_host *host) +{ + struct mmc_host *mmc = host->mmc; + unsigned int max_blocks; + unsigned int bounce_size; + int ret; + + /* + * Cap the bounce buffer at 64KB. Using a bigger bounce buffer + * has diminishing returns, this is probably because SD/MMC + * cards are usually optimized to handle this size of requests. + */ + bounce_size = SZ_64K; + /* + * Adjust downwards to maximum request size if this is less + * than our segment size, else hammer down the maximum + * request size to the maximum buffer size. + */ + if (mmc->max_req_size < bounce_size) + bounce_size = mmc->max_req_size; + max_blocks = bounce_size / 512; + + /* + * When we just support one segment, we can get significant + * speedups by the help of a bounce buffer to group scattered + * reads/writes together. + */ + host->bounce_buffer = devm_kmalloc(mmc->parent, + bounce_size, + GFP_KERNEL); + if (!host->bounce_buffer) { + pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", + mmc_hostname(mmc), + bounce_size); + /* + * Exiting with zero here makes sure we proceed with + * mmc->max_segs == 1. + */ + return 0; + } + + host->bounce_addr = dma_map_single(mmc->parent, + host->bounce_buffer, + bounce_size, + DMA_BIDIRECTIONAL); + ret = dma_mapping_error(mmc->parent, host->bounce_addr); + if (ret) + /* Again fall back to max_segs == 1 */ + return 0; + host->bounce_buffer_size = bounce_size; + + /* Lie about this since we're bouncing */ + mmc->max_segs = max_blocks; + mmc->max_seg_size = bounce_size; + mmc->max_req_size = bounce_size; + + pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", + mmc_hostname(mmc), max_blocks, bounce_size); + + return 0; +} + int sdhci_setup_host(struct sdhci_host *host) { struct mmc_host *mmc; @@ -3713,6 +3854,13 @@ int sdhci_setup_host(struct sdhci_host *host) */ mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; + if (mmc->max_segs == 1) { + /* This may alter mmc->*_blk_* parameters */ + ret = sdhci_allocate_bounce_buffer(host); + if (ret) + return ret; + } + return 0; unreg: diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 54bc444c317f18..1d7d61e25dbf07 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -440,6 +440,9 @@ struct sdhci_host { int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ + char *bounce_buffer; /* For packing SDMA reads/writes */ + dma_addr_t bounce_addr; + unsigned int bounce_buffer_size; const struct sdhci_ops *ops; /* Low level hw interface */ From e8012ff8779378a650aab3d088239053bfbdc57f Mon Sep 17 00:00:00 2001 From: Phil Elwell Date: Mon, 12 Feb 2018 21:13:44 +0100 Subject: [PATCH 2080/3326] mmc: bcm2835: Don't overwrite max frequency unconditionally commit 118032be389009b07ecb5a03ffe219a89d421def upstream. The optional DT parameter max-frequency could init the max bus frequency. So take care of this, before setting the max bus frequency. Fixes: 660fc733bd74 ("mmc: bcm2835: Add new driver for the sdhost controller.") Signed-off-by: Phil Elwell Signed-off-by: Stefan Wahren Cc: # 4.12+ Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/bcm2835.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 229dc18f0581be..768972af8b853c 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1265,7 +1265,8 @@ static int bcm2835_add_host(struct bcm2835_host *host) char pio_limit_string[20]; int ret; - mmc->f_max = host->max_clk; + if (!mmc->f_max || mmc->f_max > host->max_clk) + mmc->f_max = host->max_clk; mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV; mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000); From 723cc3aeba9be034c7509860dbf445e1bd25467f Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Mon, 12 Feb 2018 14:13:59 +0100 Subject: [PATCH 2081/3326] Revert "mmc: meson-gx: include tx phase in the tuning process" commit fe0e58048f005fdce315eb4d185e5c160be4ac01 upstream. This reverts commit 0a44697627d17a66d7dc98f17aeca07ca79c5c20. This commit was initially intended to fix problems with hs200 and hs400 on some boards, mainly the odroid-c2. The OC2 (Rev 0.2) I have performs well in this modes, so I could not confirm these issues. We've had several reports about the issues being still present on (some) OC2, so apparently, this change does not do what it was supposed to do. Maybe the eMMC signal quality is on the edge on the board. This may explain the variability we see in term of stability, but this is just a guess. Lowering the max_frequency to 100Mhz seems to do trick for those affected by the issue Worse, the commit created new issues (CRC errors and hangs) on other boards, such as the kvim 1 and 2, the p200 or the libretech-cc. According to amlogic, the Tx phase should not be tuned and left in its default configuration, so it is best to just revert the commit. Fixes: 0a44697627d1 ("mmc: meson-gx: include tx phase in the tuning process") Cc: # 4.14+ Signed-off-by: Jerome Brunet Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/meson-gx-mmc.c | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 85745ef179e22b..08a55c2e96e1b4 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -716,22 +716,6 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode, static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct meson_host *host = mmc_priv(mmc); - int ret; - - /* - * If this is the initial tuning, try to get a sane Rx starting - * phase before doing the actual tuning. - */ - if (!mmc->doing_retune) { - ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); - - if (ret) - return ret; - } - - ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk); - if (ret) - return ret; return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); } @@ -762,9 +746,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (!IS_ERR(mmc->supply.vmmc)) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); - /* Reset phases */ + /* Reset rx phase */ clk_set_phase(host->rx_clk, 0); - clk_set_phase(host->tx_clk, 270); break; From e7cedb56ae9a1c00b76df98d2adebb6208d2b71e Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 5 Feb 2018 16:24:52 +0200 Subject: [PATCH 2082/3326] mlx5: fix mlx5_get_vector_affinity to start from completion vector 0 commit 2572cf57d75a7f91835d9a38771e9e76d575d122 upstream. The consumers of this routine expects the affinity map of of vector index relative to the first completion vector. The upper layers are not aware of internal/private completion vectors that mlx5 allocates for its own usage. Hence, return the affinity map of vector index relative to the first completion vector. Fixes: 05e0cc84e00c ("net/mlx5: Fix get vector affinity helper function") Reported-by: Logan Gunthorpe Tested-by: Max Gurtovoy Reviewed-by: Max Gurtovoy Cc: # v4.15 Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- include/linux/mlx5/driver.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index a13525daf09b35..ae15864c870811 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1201,7 +1201,7 @@ mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) int eqn; int err; - err = mlx5_vector2eqn(dev, vector, &eqn, &irq); + err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq); if (err) return NULL; From 598b21708ec09ad33cd66d162d2fea2dd66d9f64 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Wed, 24 Jan 2018 19:35:45 +0100 Subject: [PATCH 2083/3326] Revert "apple-gmux: lock iGP IO to protect from vgaarb changes" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d6fa7588fd7a8def4c747c0c574ce85d453e3788 upstream. Commit 4eebd5a4e726 ("apple-gmux: lock iGP IO to protect from vgaarb changes") amended this driver's ->probe hook to lock decoding of normal (non-legacy) I/O space accesses to the integrated GPU on dual-GPU MacBook Pros. The lock stays in place until the driver is unbound. The change was made to work around an issue with the out-of-tree nvidia graphics driver (available at http://www.nvidia.com/object/unix.html). It contains the following sequence in nvidia/nv.c: #if defined(CONFIG_VGA_ARB) && !defined(NVCPU_PPC64LE) #if defined(VGA_DEFAULT_DEVICE) vga_tryget(VGA_DEFAULT_DEVICE, VGA_RSRC_LEGACY_MASK); #endif vga_set_legacy_decoding(dev, VGA_RSRC_NONE); #endif This code was reported to cause deadlocks with VFIO already in 2013: https://devtalk.nvidia.com/default/topic/545560 I've reported the issue to Nvidia developers once more in 2017: https://www.spinics.net/lists/dri-devel/msg138754.html On the MacBookPro10,1, this code apparently breaks backlight control (which is handled by apple-gmux via an I/O region starting at 0x700), as reported by Petri Hodju: https://bugzilla.kernel.org/show_bug.cgi?id=86121 I tried to replicate Petri's observations on my MacBook9,1, which uses the same Intel Ivy Bridge + Nvidia GeForce GT 650M architecture, to no avail. On my machine apple-gmux' I/O region remains accessible even with the nvidia driver loaded and commit 4eebd5a4e726 reverted. Petri reported that apple-gmux becomes accessible again after a suspend/resume cycle because the BIOS changed the VGA routing on the root port to the Nvidia GPU. Perhaps this is a BIOS issue after all that can be fixed with an update? In any case, the change made by commit 4eebd5a4e726 has turned out to cause two new issues: * Wilfried Klaebe reports a deadlock when launching Xorg because it opens /dev/vga_arbiter and calls vga_get(), but apple-gmux is holding a lock on I/O space indefinitely. It looks like apple-gmux' current behavior is an abuse of the vgaarb API as locks are not meant to be held for longer periods: https://bugzilla.kernel.org/show_bug.cgi?id=88861#c11 https://bugzilla.kernel.org/attachment.cgi?id=217541 * On dual GPU MacBook Pros introduced since 2013, the integrated GPU is powergated on boot und thus becomes invisible to Linux unless a custom EFI protocol is used to leave it powered on. (A patch exists but is not in mainline yet due to several negative side effects.) On these machines, locking I/O to the integrated GPU (as done by 4eebd5a4e726) fails and backlight control is therefore broken: https://bugzilla.kernel.org/show_bug.cgi?id=105051 So let's revert commit 4eebd5a4e726 please. Users experiencing the issue with the proprietary nvidia driver can comment out the above- quoted problematic code as a workaround (or try updating the BIOS). Cc: Petri Hodju Cc: Bjorn Helgaas Cc: Bruno Prémont Cc: Andy Ritger Cc: Ronald Tschalär Tested-by: Wilfried Klaebe Signed-off-by: Lukas Wunner Cc: stable@vger.kernel.org Signed-off-by: Darren Hart (VMware) Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/apple-gmux.c | 48 +------------------------------ 1 file changed, 1 insertion(+), 47 deletions(-) diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index 623d322447a219..7c4eb86c851ed9 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -54,7 +53,6 @@ struct apple_gmux_data { bool indexed; struct mutex index_lock; - struct pci_dev *pdev; struct backlight_device *bdev; /* switcheroo data */ @@ -599,23 +597,6 @@ static int gmux_resume(struct device *dev) return 0; } -static struct pci_dev *gmux_get_io_pdev(void) -{ - struct pci_dev *pdev = NULL; - - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) { - u16 cmd; - - pci_read_config_word(pdev, PCI_COMMAND, &cmd); - if (!(cmd & PCI_COMMAND_IO)) - continue; - - return pdev; - } - - return NULL; -} - static int is_thunderbolt(struct device *dev, void *data) { return to_pci_dev(dev)->is_thunderbolt; @@ -631,7 +612,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) int ret = -ENXIO; acpi_status status; unsigned long long gpe; - struct pci_dev *pdev = NULL; if (apple_gmux_data) return -EBUSY; @@ -682,7 +662,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) ver_minor = (version >> 16) & 0xff; ver_release = (version >> 8) & 0xff; } else { - pr_info("gmux device not present or IO disabled\n"); + pr_info("gmux device not present\n"); ret = -ENODEV; goto err_release; } @@ -690,23 +670,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor, ver_release, (gmux_data->indexed ? "indexed" : "classic")); - /* - * Apple systems with gmux are EFI based and normally don't use - * VGA. In addition changing IO+MEM ownership between IGP and dGPU - * disables IO/MEM used for backlight control on some systems. - * Lock IO+MEM to GPU with active IO to prevent switch. - */ - pdev = gmux_get_io_pdev(); - if (pdev && vga_tryget(pdev, - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM)) { - pr_err("IO+MEM vgaarb-locking for PCI:%s failed\n", - pci_name(pdev)); - ret = -EBUSY; - goto err_release; - } else if (pdev) - pr_info("locked IO for PCI:%s\n", pci_name(pdev)); - gmux_data->pdev = pdev; - memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS); @@ -822,10 +785,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) err_notify: backlight_device_unregister(bdev); err_release: - if (gmux_data->pdev) - vga_put(gmux_data->pdev, - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM); - pci_dev_put(pdev); release_region(gmux_data->iostart, gmux_data->iolen); err_free: kfree(gmux_data); @@ -845,11 +804,6 @@ static void gmux_remove(struct pnp_dev *pnp) &gmux_notify_handler); } - if (gmux_data->pdev) { - vga_put(gmux_data->pdev, - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM); - pci_dev_put(gmux_data->pdev); - } backlight_device_unregister(gmux_data->bdev); release_region(gmux_data->iostart, gmux_data->iolen); From 879bcbe0913fb6f938157af821d10f48235c166c Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Wed, 10 Jan 2018 00:27:29 -0500 Subject: [PATCH 2084/3326] jbd2: fix sphinx kernel-doc build warnings commit f69120ce6c024aa634a8fc25787205e42f0ccbe6 upstream. Sphinx emits various (26) warnings when building make target 'htmldocs'. Currently struct definitions contain duplicate documentation, some as kernel-docs and some as standard c89 comments. We can reduce duplication while cleaning up the kernel docs. Move all kernel-docs to right above each struct member. Use the set of all existing comments (kernel-doc and c89). Add documentation for missing struct members and function arguments. Signed-off-by: Tobin C. Harding Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- fs/jbd2/transaction.c | 5 +- include/linux/jbd2.h | 431 ++++++++++++++++++++++++++---------------- 2 files changed, 272 insertions(+), 164 deletions(-) diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 8b08044b31209f..c0681814c37917 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -495,8 +495,10 @@ void jbd2_journal_free_reserved(handle_t *handle) EXPORT_SYMBOL(jbd2_journal_free_reserved); /** - * int jbd2_journal_start_reserved(handle_t *handle) - start reserved handle + * int jbd2_journal_start_reserved() - start reserved handle * @handle: handle to start + * @type: for handle statistics + * @line_no: for handle statistics * * Start handle that has been previously reserved with jbd2_journal_reserve(). * This attaches @handle to the running transaction (or creates one if there's @@ -626,6 +628,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks) * int jbd2_journal_restart() - restart a handle . * @handle: handle to restart * @nblocks: nr credits requested + * @gfp_mask: memory allocation flags (for start_this_handle) * * Restart a handle for a multi-transaction filesystem * operation. diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 606b6bce3a5bb0..29290bfb94a847 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -418,26 +418,41 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) #define JI_WAIT_DATA (1 << __JI_WAIT_DATA) /** - * struct jbd_inode is the structure linking inodes in ordered mode - * present in a transaction so that we can sync them during commit. + * struct jbd_inode - The jbd_inode type is the structure linking inodes in + * ordered mode present in a transaction so that we can sync them during commit. */ struct jbd2_inode { - /* Which transaction does this inode belong to? Either the running - * transaction or the committing one. [j_list_lock] */ + /** + * @i_transaction: + * + * Which transaction does this inode belong to? Either the running + * transaction or the committing one. [j_list_lock] + */ transaction_t *i_transaction; - /* Pointer to the running transaction modifying inode's data in case - * there is already a committing transaction touching it. [j_list_lock] */ + /** + * @i_next_transaction: + * + * Pointer to the running transaction modifying inode's data in case + * there is already a committing transaction touching it. [j_list_lock] + */ transaction_t *i_next_transaction; - /* List of inodes in the i_transaction [j_list_lock] */ + /** + * @i_list: List of inodes in the i_transaction [j_list_lock] + */ struct list_head i_list; - /* VFS inode this inode belongs to [constant during the lifetime - * of the structure] */ + /** + * @i_vfs_inode: + * + * VFS inode this inode belongs to [constant for lifetime of structure] + */ struct inode *i_vfs_inode; - /* Flags of inode [j_list_lock] */ + /** + * @i_flags: Flags of inode [j_list_lock] + */ unsigned long i_flags; }; @@ -447,12 +462,20 @@ struct jbd2_revoke_table_s; * struct handle_s - The handle_s type is the concrete type associated with * handle_t. * @h_transaction: Which compound transaction is this update a part of? + * @h_journal: Which journal handle belongs to - used iff h_reserved set. + * @h_rsv_handle: Handle reserved for finishing the logical operation. * @h_buffer_credits: Number of remaining buffers we are allowed to dirty. - * @h_ref: Reference count on this handle - * @h_err: Field for caller's use to track errors through large fs operations - * @h_sync: flag for sync-on-close - * @h_jdata: flag to force data journaling - * @h_aborted: flag indicating fatal error on handle + * @h_ref: Reference count on this handle. + * @h_err: Field for caller's use to track errors through large fs operations. + * @h_sync: Flag for sync-on-close. + * @h_jdata: Flag to force data journaling. + * @h_reserved: Flag for handle for reserved credits. + * @h_aborted: Flag indicating fatal error on handle. + * @h_type: For handle statistics. + * @h_line_no: For handle statistics. + * @h_start_jiffies: Handle Start time. + * @h_requested_credits: Holds @h_buffer_credits after handle is started. + * @saved_alloc_context: Saved context while transaction is open. **/ /* Docbook can't yet cope with the bit fields, but will leave the documentation @@ -462,32 +485,23 @@ struct jbd2_revoke_table_s; struct jbd2_journal_handle { union { - /* Which compound transaction is this update a part of? */ transaction_t *h_transaction; /* Which journal handle belongs to - used iff h_reserved set */ journal_t *h_journal; }; - /* Handle reserved for finishing the logical operation */ handle_t *h_rsv_handle; - - /* Number of remaining buffers we are allowed to dirty: */ int h_buffer_credits; - - /* Reference count on this handle */ int h_ref; - - /* Field for caller's use to track errors through large fs */ - /* operations */ int h_err; /* Flags [no locking] */ - unsigned int h_sync: 1; /* sync-on-close */ - unsigned int h_jdata: 1; /* force data journaling */ - unsigned int h_reserved: 1; /* handle with reserved credits */ - unsigned int h_aborted: 1; /* fatal error on handle */ - unsigned int h_type: 8; /* for handle statistics */ - unsigned int h_line_no: 16; /* for handle statistics */ + unsigned int h_sync: 1; + unsigned int h_jdata: 1; + unsigned int h_reserved: 1; + unsigned int h_aborted: 1; + unsigned int h_type: 8; + unsigned int h_line_no: 16; unsigned long h_start_jiffies; unsigned int h_requested_credits; @@ -729,228 +743,253 @@ jbd2_time_diff(unsigned long start, unsigned long end) /** * struct journal_s - The journal_s type is the concrete type associated with * journal_t. - * @j_flags: General journaling state flags - * @j_errno: Is there an outstanding uncleared error on the journal (from a - * prior abort)? - * @j_sb_buffer: First part of superblock buffer - * @j_superblock: Second part of superblock buffer - * @j_format_version: Version of the superblock format - * @j_state_lock: Protect the various scalars in the journal - * @j_barrier_count: Number of processes waiting to create a barrier lock - * @j_barrier: The barrier lock itself - * @j_running_transaction: The current running transaction.. - * @j_committing_transaction: the transaction we are pushing to disk - * @j_checkpoint_transactions: a linked circular list of all transactions - * waiting for checkpointing - * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction - * to start committing, or for a barrier lock to be released - * @j_wait_done_commit: Wait queue for waiting for commit to complete - * @j_wait_commit: Wait queue to trigger commit - * @j_wait_updates: Wait queue to wait for updates to complete - * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop - * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints - * @j_head: Journal head - identifies the first unused block in the journal - * @j_tail: Journal tail - identifies the oldest still-used block in the - * journal. - * @j_free: Journal free - how many free blocks are there in the journal? - * @j_first: The block number of the first usable block - * @j_last: The block number one beyond the last usable block - * @j_dev: Device where we store the journal - * @j_blocksize: blocksize for the location where we store the journal. - * @j_blk_offset: starting block offset for into the device where we store the - * journal - * @j_fs_dev: Device which holds the client fs. For internal journal this will - * be equal to j_dev - * @j_reserved_credits: Number of buffers reserved from the running transaction - * @j_maxlen: Total maximum capacity of the journal region on disk. - * @j_list_lock: Protects the buffer lists and internal buffer state. - * @j_inode: Optional inode where we store the journal. If present, all journal - * block numbers are mapped into this inode via bmap(). - * @j_tail_sequence: Sequence number of the oldest transaction in the log - * @j_transaction_sequence: Sequence number of the next transaction to grant - * @j_commit_sequence: Sequence number of the most recently committed - * transaction - * @j_commit_request: Sequence number of the most recent transaction wanting - * commit - * @j_uuid: Uuid of client object. - * @j_task: Pointer to the current commit thread for this journal - * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a - * single compound commit transaction - * @j_commit_interval: What is the maximum transaction lifetime before we begin - * a commit? - * @j_commit_timer: The timer used to wakeup the commit thread - * @j_revoke_lock: Protect the revoke table - * @j_revoke: The revoke table - maintains the list of revoked blocks in the - * current transaction. - * @j_revoke_table: alternate revoke tables for j_revoke - * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction - * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the - * number that will fit in j_blocksize - * @j_last_sync_writer: most recent pid which did a synchronous write - * @j_history_lock: Protect the transactions statistics history - * @j_proc_entry: procfs entry for the jbd statistics directory - * @j_stats: Overall statistics - * @j_private: An opaque pointer to fs-private information. - * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies */ - struct journal_s { - /* General journaling state flags [j_state_lock] */ + /** + * @j_flags: General journaling state flags [j_state_lock] + */ unsigned long j_flags; - /* + /** + * @j_errno: + * * Is there an outstanding uncleared error on the journal (from a prior * abort)? [j_state_lock] */ int j_errno; - /* The superblock buffer */ + /** + * @j_sb_buffer: The first part of the superblock buffer. + */ struct buffer_head *j_sb_buffer; + + /** + * @j_superblock: The second part of the superblock buffer. + */ journal_superblock_t *j_superblock; - /* Version of the superblock format */ + /** + * @j_format_version: Version of the superblock format. + */ int j_format_version; - /* - * Protect the various scalars in the journal + /** + * @j_state_lock: Protect the various scalars in the journal. */ rwlock_t j_state_lock; - /* + /** + * @j_barrier_count: + * * Number of processes waiting to create a barrier lock [j_state_lock] */ int j_barrier_count; - /* The barrier lock itself */ + /** + * @j_barrier: The barrier lock itself. + */ struct mutex j_barrier; - /* + /** + * @j_running_transaction: + * * Transactions: The current running transaction... * [j_state_lock] [caller holding open handle] */ transaction_t *j_running_transaction; - /* + /** + * @j_committing_transaction: + * * the transaction we are pushing to disk * [j_state_lock] [caller holding open handle] */ transaction_t *j_committing_transaction; - /* + /** + * @j_checkpoint_transactions: + * * ... and a linked circular list of all transactions waiting for * checkpointing. [j_list_lock] */ transaction_t *j_checkpoint_transactions; - /* + /** + * @j_wait_transaction_locked: + * * Wait queue for waiting for a locked transaction to start committing, - * or for a barrier lock to be released + * or for a barrier lock to be released. */ wait_queue_head_t j_wait_transaction_locked; - /* Wait queue for waiting for commit to complete */ + /** + * @j_wait_done_commit: Wait queue for waiting for commit to complete. + */ wait_queue_head_t j_wait_done_commit; - /* Wait queue to trigger commit */ + /** + * @j_wait_commit: Wait queue to trigger commit. + */ wait_queue_head_t j_wait_commit; - /* Wait queue to wait for updates to complete */ + /** + * @j_wait_updates: Wait queue to wait for updates to complete. + */ wait_queue_head_t j_wait_updates; - /* Wait queue to wait for reserved buffer credits to drop */ + /** + * @j_wait_reserved: + * + * Wait queue to wait for reserved buffer credits to drop. + */ wait_queue_head_t j_wait_reserved; - /* Semaphore for locking against concurrent checkpoints */ + /** + * @j_checkpoint_mutex: + * + * Semaphore for locking against concurrent checkpoints. + */ struct mutex j_checkpoint_mutex; - /* + /** + * @j_chkpt_bhs: + * * List of buffer heads used by the checkpoint routine. This * was moved from jbd2_log_do_checkpoint() to reduce stack * usage. Access to this array is controlled by the - * j_checkpoint_mutex. [j_checkpoint_mutex] + * @j_checkpoint_mutex. [j_checkpoint_mutex] */ struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; - - /* + + /** + * @j_head: + * * Journal head: identifies the first unused block in the journal. * [j_state_lock] */ unsigned long j_head; - /* + /** + * @j_tail: + * * Journal tail: identifies the oldest still-used block in the journal. * [j_state_lock] */ unsigned long j_tail; - /* + /** + * @j_free: + * * Journal free: how many free blocks are there in the journal? * [j_state_lock] */ unsigned long j_free; - /* - * Journal start and end: the block numbers of the first usable block - * and one beyond the last usable block in the journal. [j_state_lock] + /** + * @j_first: + * + * The block number of the first usable block in the journal + * [j_state_lock]. */ unsigned long j_first; + + /** + * @j_last: + * + * The block number one beyond the last usable block in the journal + * [j_state_lock]. + */ unsigned long j_last; - /* - * Device, blocksize and starting block offset for the location where we - * store the journal. + /** + * @j_dev: Device where we store the journal. */ struct block_device *j_dev; + + /** + * @j_blocksize: Block size for the location where we store the journal. + */ int j_blocksize; + + /** + * @j_blk_offset: + * + * Starting block offset into the device where we store the journal. + */ unsigned long long j_blk_offset; + + /** + * @j_devname: Journal device name. + */ char j_devname[BDEVNAME_SIZE+24]; - /* + /** + * @j_fs_dev: + * * Device which holds the client fs. For internal journal this will be * equal to j_dev. */ struct block_device *j_fs_dev; - /* Total maximum capacity of the journal region on disk. */ + /** + * @j_maxlen: Total maximum capacity of the journal region on disk. + */ unsigned int j_maxlen; - /* Number of buffers reserved from the running transaction */ + /** + * @j_reserved_credits: + * + * Number of buffers reserved from the running transaction. + */ atomic_t j_reserved_credits; - /* - * Protects the buffer lists and internal buffer state. + /** + * @j_list_lock: Protects the buffer lists and internal buffer state. */ spinlock_t j_list_lock; - /* Optional inode where we store the journal. If present, all */ - /* journal block numbers are mapped into this inode via */ - /* bmap(). */ + /** + * @j_inode: + * + * Optional inode where we store the journal. If present, all + * journal block numbers are mapped into this inode via bmap(). + */ struct inode *j_inode; - /* + /** + * @j_tail_sequence: + * * Sequence number of the oldest transaction in the log [j_state_lock] */ tid_t j_tail_sequence; - /* + /** + * @j_transaction_sequence: + * * Sequence number of the next transaction to grant [j_state_lock] */ tid_t j_transaction_sequence; - /* + /** + * @j_commit_sequence: + * * Sequence number of the most recently committed transaction * [j_state_lock]. */ tid_t j_commit_sequence; - /* + /** + * @j_commit_request: + * * Sequence number of the most recent transaction wanting commit * [j_state_lock] */ tid_t j_commit_request; - /* + /** + * @j_uuid: + * * Journal uuid: identifies the object (filesystem, LVM volume etc) * backed by this journal. This will eventually be replaced by an array * of uuids, allowing us to index multiple devices within a single @@ -958,85 +997,151 @@ struct journal_s */ __u8 j_uuid[16]; - /* Pointer to the current commit thread for this journal */ + /** + * @j_task: Pointer to the current commit thread for this journal. + */ struct task_struct *j_task; - /* + /** + * @j_max_transaction_buffers: + * * Maximum number of metadata buffers to allow in a single compound - * commit transaction + * commit transaction. */ int j_max_transaction_buffers; - /* + /** + * @j_commit_interval: + * * What is the maximum transaction lifetime before we begin a commit? */ unsigned long j_commit_interval; - /* The timer used to wakeup the commit thread: */ + /** + * @j_commit_timer: The timer used to wakeup the commit thread. + */ struct timer_list j_commit_timer; - /* - * The revoke table: maintains the list of revoked blocks in the - * current transaction. [j_revoke_lock] + /** + * @j_revoke_lock: Protect the revoke table. */ spinlock_t j_revoke_lock; + + /** + * @j_revoke: + * + * The revoke table - maintains the list of revoked blocks in the + * current transaction. + */ struct jbd2_revoke_table_s *j_revoke; + + /** + * @j_revoke_table: Alternate revoke tables for j_revoke. + */ struct jbd2_revoke_table_s *j_revoke_table[2]; - /* - * array of bhs for jbd2_journal_commit_transaction + /** + * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction. */ struct buffer_head **j_wbuf; + + /** + * @j_wbufsize: + * + * Size of @j_wbuf array. + */ int j_wbufsize; - /* - * this is the pid of hte last person to run a synchronous operation - * through the journal + /** + * @j_last_sync_writer: + * + * The pid of the last person to run a synchronous operation + * through the journal. */ pid_t j_last_sync_writer; - /* - * the average amount of time in nanoseconds it takes to commit a + /** + * @j_average_commit_time: + * + * The average amount of time in nanoseconds it takes to commit a * transaction to disk. [j_state_lock] */ u64 j_average_commit_time; - /* - * minimum and maximum times that we should wait for - * additional filesystem operations to get batched into a - * synchronous handle in microseconds + /** + * @j_min_batch_time: + * + * Minimum time that we should wait for additional filesystem operations + * to get batched into a synchronous handle in microseconds. */ u32 j_min_batch_time; + + /** + * @j_max_batch_time: + * + * Maximum time that we should wait for additional filesystem operations + * to get batched into a synchronous handle in microseconds. + */ u32 j_max_batch_time; - /* This function is called when a transaction is closed */ + /** + * @j_commit_callback: + * + * This function is called when a transaction is closed. + */ void (*j_commit_callback)(journal_t *, transaction_t *); /* * Journal statistics */ + + /** + * @j_history_lock: Protect the transactions statistics history. + */ spinlock_t j_history_lock; + + /** + * @j_proc_entry: procfs entry for the jbd statistics directory. + */ struct proc_dir_entry *j_proc_entry; + + /** + * @j_stats: Overall statistics. + */ struct transaction_stats_s j_stats; - /* Failed journal commit ID */ + /** + * @j_failed_commit: Failed journal commit ID. + */ unsigned int j_failed_commit; - /* + /** + * @j_private: + * * An opaque pointer to fs-private information. ext3 puts its - * superblock pointer here + * superblock pointer here. */ void *j_private; - /* Reference to checksum algorithm driver via cryptoapi */ + /** + * @j_chksum_driver: + * + * Reference to checksum algorithm driver via cryptoapi. + */ struct crypto_shash *j_chksum_driver; - /* Precomputed journal UUID checksum for seeding other checksums */ + /** + * @j_csum_seed: + * + * Precomputed journal UUID checksum for seeding other checksums. + */ __u32 j_csum_seed; #ifdef CONFIG_DEBUG_LOCK_ALLOC - /* + /** + * @j_trans_commit_map: + * * Lockdep entity to track transaction commit dependencies. Handles * hold this "lock" for read, when we wait for commit, we acquire the * "lock" for writing. This matches the properties of jbd2 journalling From 4d4d103a1b7e2990bcdbf83a24a83b3e351b6838 Mon Sep 17 00:00:00 2001 From: Harshad Shirwadkar Date: Wed, 10 Jan 2018 00:13:13 -0500 Subject: [PATCH 2085/3326] ext4: fix a race in the ext4 shutdown path commit abbc3f9395c76d554a9ed27d4b1ebfb5d9b0e4ca upstream. This patch fixes a race between the shutdown path and bio completion handling. In the ext4 direct io path with async io, after submitting a bio to the block layer, if journal starting fails, ext4_direct_IO_write() would bail out pretending that the IO failed. The caller would have had no way of knowing whether or not the IO was successfully submitted. So instead, we return -EIOCBQUEUED in this case. Now, the caller knows that the IO was submitted. The bio completion handler takes care of the error. Tested: Ran the shutdown xfstest test 461 in loop for over 2 hours across 4 machines resulting in over 400 runs. Verified that the race didn't occur. Usually the race was seen in about 20-30 iterations. Signed-off-by: Harshad Shirwadkar Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- fs/ext4/inode.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index ea2ccc524bd98b..0b9f3f28479908 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3724,10 +3724,18 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) /* Credits for sb + inode write */ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); if (IS_ERR(handle)) { - /* This is really bad luck. We've written the data - * but cannot extend i_size. Bail out and pretend - * the write failed... */ - ret = PTR_ERR(handle); + /* + * We wrote the data but cannot extend + * i_size. Bail out. In async io case, we do + * not return error here because we have + * already submmitted the corresponding + * bio. Returning error here makes the caller + * think that this IO is done and failed + * resulting in race with bio's completion + * handler. + */ + if (!ret) + ret = PTR_ERR(handle); if (inode->i_nlink) ext4_orphan_del(NULL, inode); From 2e38988253c165a8b81d3ceea55fb68cb2d19e1c Mon Sep 17 00:00:00 2001 From: Zhouyi Zhou Date: Wed, 10 Jan 2018 00:34:19 -0500 Subject: [PATCH 2086/3326] ext4: save error to disk in __ext4_grp_locked_error() commit 06f29cc81f0350261f59643a505010531130eea0 upstream. In the function __ext4_grp_locked_error(), __save_error_info() is called to save error info in super block block, but does not sync that information to disk to info the subsequence fsck after reboot. This patch writes the error information to disk. After this patch, I think there is no obvious EXT4 error handle branches which leads to "Remounting filesystem read-only" will leave the disk partition miss the subsequence fsck. Signed-off-by: Zhouyi Zhou Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- fs/ext4/super.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index f29351c6661097..16d247f056e2b4 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -742,6 +742,7 @@ __acquires(bitlock) } ext4_unlock_group(sb, grp); + ext4_commit_super(sb, 1); ext4_handle_error(sb); /* * We only get here in the ERRORS_RO case; relocking the group From ef7fd28b1137c4023a5fc9fc0497a92838d81910 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ernesto=20A=2E=20Fern=C3=A1ndez?= Date: Thu, 11 Jan 2018 13:43:33 -0500 Subject: [PATCH 2087/3326] ext4: correct documentation for grpid mount option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 9f0372488cc9243018a812e8cfbf27de650b187b upstream. The grpid option is currently described as being the same as nogrpid. Signed-off-by: Ernesto A. Fernández Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- Documentation/filesystems/ext4.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt index 5a8f7f4d2bca22..7449893dc039c4 100644 --- a/Documentation/filesystems/ext4.txt +++ b/Documentation/filesystems/ext4.txt @@ -233,7 +233,7 @@ data_err=ignore(*) Just print an error message if an error occurs data_err=abort Abort the journal if an error occurs in a file data buffer in ordered mode. -grpid Give objects the same group ID as their creator. +grpid New objects have the group ID of their parent. bsdgroups nogrpid (*) New objects have the group ID of their creator. From 568c61facce6b0bddbfa04ec634b34904c6e2f1c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 16 Feb 2018 16:25:53 +0100 Subject: [PATCH 2088/3326] mm: hide a #warning for COMPILE_TEST commit af27d9403f5b80685b79c88425086edccecaf711 upstream. We get a warning about some slow configurations in randconfig kernels: mm/memory.c:83:2: error: #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. [-Werror=cpp] The warning is reasonable by itself, but gets in the way of randconfig build testing, so I'm hiding it whenever CONFIG_COMPILE_TEST is set. The warning was added in 2013 in commit 75980e97dacc ("mm: fold page->_last_nid into page->flags where possible"). Cc: stable@vger.kernel.org Signed-off-by: Arnd Bergmann Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index a728bed16c2069..fc7779165dcf09 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -81,7 +81,7 @@ #include "internal.h" -#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS +#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. #endif From 1f21cd46cacae0fe7429d27c8781370317fe4a67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20H=2E=20Sch=C3=B6nherr?= Date: Fri, 19 Jan 2018 16:27:54 -0800 Subject: [PATCH 2089/3326] mm: Fix memory size alignment in devm_memremap_pages_release() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 10a0cd6e4932b5078215b1ec2c896597eec0eff9 upstream. The functions devm_memremap_pages() and devm_memremap_pages_release() use different ways to calculate the section-aligned amount of memory. The latter function may use an incorrect size if the memory region is small but straddles a section border. Use the same code for both. Cc: Fixes: 5f29a77cd957 ("mm: fix mixed zone detection in devm_memremap_pages") Signed-off-by: Jan H. Schönherr Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- kernel/memremap.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/memremap.c b/kernel/memremap.c index 403ab9cdb949a0..4712ce646e04b5 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -301,7 +301,8 @@ static void devm_memremap_pages_release(struct device *dev, void *data) /* pages are dead and unused, undo the arch mapping */ align_start = res->start & ~(SECTION_SIZE - 1); - align_size = ALIGN(resource_size(res), SECTION_SIZE); + align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) + - align_start; mem_hotplug_begin(); arch_remove_memory(align_start, align_size); From f4f261974c6bf60f6b4cf3b6f1ea836504dc95b3 Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Wed, 17 Jan 2018 19:56:38 +0100 Subject: [PATCH 2090/3326] MIPS: Fix typo BIG_ENDIAN to CPU_BIG_ENDIAN commit 2e6522c565522a2e18409c315c49d78c8b74807b upstream. MIPS_GENERIC selects some options conditional on BIG_ENDIAN which does not exist. Replace BIG_ENDIAN with CPU_BIG_ENDIAN which is the correct kconfig name. Note that BMIPS_GENERIC does the same which confirms that this patch is needed. Fixes: eed0eabd12ef0 ("MIPS: generic: Introduce generic DT-based board support") Signed-off-by: Corentin Labbe Reviewed-by: James Hogan Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: # 4.9+ Patchwork: https://patchwork.linux-mips.org/patch/18495/ [jhogan@kernel.org: Clean up commit message] Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/Kconfig | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index c3d798b4403072..c82457b0e73330 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -119,12 +119,12 @@ config MIPS_GENERIC select SYS_SUPPORTS_MULTITHREADING select SYS_SUPPORTS_RELOCATABLE select SYS_SUPPORTS_SMARTMIPS - select USB_EHCI_BIG_ENDIAN_DESC if BIG_ENDIAN - select USB_EHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN - select USB_OHCI_BIG_ENDIAN_DESC if BIG_ENDIAN - select USB_OHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN - select USB_UHCI_BIG_ENDIAN_DESC if BIG_ENDIAN - select USB_UHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN + select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN + select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN + select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN + select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN + select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN + select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN select USE_OF help Select this to build a kernel which aims to support multiple boards, From 524a886aa8c98e6220e844afffa47439343db612 Mon Sep 17 00:00:00 2001 From: Marcin Nowakowski Date: Thu, 1 Feb 2018 12:37:21 +0100 Subject: [PATCH 2091/3326] MIPS: Fix incorrect mem=X@Y handling commit 67a3ba25aa955198196f40b76b329b3ab9ad415a upstream. Commit 73fbc1eba7ff ("MIPS: fix mem=X@Y commandline processing") added a fix to ensure that the memory range between PHYS_OFFSET and low memory address specified by mem= cmdline argument is not later processed by free_all_bootmem. This change was incorrect for systems where the commandline specifies more than 1 mem argument, as it will cause all memory between PHYS_OFFSET and each of the memory offsets to be marked as reserved, which results in parts of the RAM marked as reserved (Creator CI20's u-boot has a default commandline argument 'mem=256M@0x0 mem=768M@0x30000000'). Change the behaviour to ensure that only the range between PHYS_OFFSET and the lowest start address of the memories is marked as protected. This change also ensures that the range is marked protected even if it's only defined through the devicetree and not only via commandline arguments. Reported-by: Mathieu Malaterre Signed-off-by: Marcin Nowakowski Fixes: 73fbc1eba7ff ("MIPS: fix mem=X@Y commandline processing") Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: # v4.11+ Tested-by: Mathieu Malaterre Patchwork: https://patchwork.linux-mips.org/patch/18562/ Signed-off-by: James Hogan Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/setup.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index fe393972676515..795caa763da324 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -374,6 +374,7 @@ static void __init bootmem_init(void) unsigned long reserved_end; unsigned long mapstart = ~0UL; unsigned long bootmap_size; + phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX; bool bootmap_valid = false; int i; @@ -394,7 +395,8 @@ static void __init bootmem_init(void) max_low_pfn = 0; /* - * Find the highest page frame number we have available. + * Find the highest page frame number we have available + * and the lowest used RAM address */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; @@ -406,6 +408,8 @@ static void __init bootmem_init(void) end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); + ramstart = min(ramstart, boot_mem_map.map[i].addr); + #ifndef CONFIG_HIGHMEM /* * Skip highmem here so we get an accurate max_low_pfn if low @@ -435,6 +439,13 @@ static void __init bootmem_init(void) mapstart = max(reserved_end, start); } + /* + * Reserve any memory between the start of RAM and PHYS_OFFSET + */ + if (ramstart > PHYS_OFFSET) + add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET, + BOOT_MEM_RESERVED); + if (min_low_pfn >= max_low_pfn) panic("Incorrect memory mapping !!!"); if (min_low_pfn > ARCH_PFN_OFFSET) { @@ -663,9 +674,6 @@ static int __init early_parse_mem(char *p) add_memory_region(start, size, BOOT_MEM_RAM); - if (start && start > PHYS_OFFSET) - add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET, - BOOT_MEM_RESERVED); return 0; } early_param("mem", early_parse_mem); From 990bb6eb9ec21bbf5925c9def1db53958041ce3b Mon Sep 17 00:00:00 2001 From: Dongdong Liu Date: Thu, 28 Dec 2017 17:53:32 +0800 Subject: [PATCH 2092/3326] PCI: Disable MSI for HiSilicon Hip06/Hip07 only in Root Port mode commit deb86999323661c019ef2740eb9d479d1e526b5c upstream. HiSilicon Hip06/Hip07 can operate as either a Root Port or an Endpoint. It always advertises an MSI capability, but it can only generate MSIs when in Endpoint mode. The device has the same Vendor and Device IDs in both modes, so check the Class Code and disable MSI only when operating as a Root Port. [bhelgaas: changelog] Fixes: 72f2ff0deb87 ("PCI: Disable MSI for HiSilicon Hip06/Hip07 Root Ports") Signed-off-by: Dongdong Liu Signed-off-by: Bjorn Helgaas Reviewed-by: Zhou Wang Cc: stable@vger.kernel.org # v4.11+ Signed-off-by: Greg Kroah-Hartman --- drivers/pci/quirks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index f66f9375177c8a..4c3feb96f391ef 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1636,8 +1636,8 @@ static void quirk_pcie_mch(struct pci_dev *pdev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, quirk_pcie_mch); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch); /* * It's possible for the MSI to get corrupted if shpc and acpi From 8c125f391333a1e4fb1f760f5ed14657a1894bcf Mon Sep 17 00:00:00 2001 From: Ray Jui Date: Thu, 11 Jan 2018 12:36:16 -0800 Subject: [PATCH 2093/3326] PCI: iproc: Fix NULL pointer dereference for BCMA MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 3b65ca50d24ce33cb92d88840e289135c92b40ed upstream. With the inbound DMA mapping supported added, the iProc PCIe driver parses DT property "dma-ranges" through call to "of_pci_dma_range_parser_init()". In the case of BCMA, this results in a NULL pointer deference due to a missing of_node. Fix this by adding a guard in pcie-iproc-platform.c to only enable the inbound DMA mapping logic when DT property "dma-ranges" is present. Fixes: dd9d4e7498de3 ("PCI: iproc: Add inbound DMA mapping support") Reported-by: Rafał Miłecki Signed-off-by: Ray Jui [lorenzo.pieralisi@arm.com: updated commit log] Signed-off-by: Lorenzo Pieralisi Tested-by: Rafał Miłecki cc: # 4.10+ Signed-off-by: Greg Kroah-Hartman --- drivers/pci/host/pcie-iproc-platform.c | 7 +++++++ drivers/pci/host/pcie-iproc.c | 8 +++++--- drivers/pci/host/pcie-iproc.h | 2 ++ 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index a5073a921a049c..32228d41f7462d 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c @@ -92,6 +92,13 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) pcie->need_ob_cfg = true; } + /* + * DT nodes are not used by all platforms that use the iProc PCIe + * core driver. For platforms that require explict inbound mapping + * configuration, "dma-ranges" would have been present in DT + */ + pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges"); + /* PHY use is optional */ pcie->phy = devm_phy_get(dev, "pcie-phy"); if (IS_ERR(pcie->phy)) { diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index 3a8b9d20ee57b7..c0ecc9f35667f7 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c @@ -1396,9 +1396,11 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) } } - ret = iproc_pcie_map_dma_ranges(pcie); - if (ret && ret != -ENOENT) - goto err_power_off_phy; + if (pcie->need_ib_cfg) { + ret = iproc_pcie_map_dma_ranges(pcie); + if (ret && ret != -ENOENT) + goto err_power_off_phy; + } #ifdef CONFIG_ARM pcie->sysdata.private_data = pcie; diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h index a6b55cec9a668e..4ac6282f2bfd60 100644 --- a/drivers/pci/host/pcie-iproc.h +++ b/drivers/pci/host/pcie-iproc.h @@ -74,6 +74,7 @@ struct iproc_msi; * @ob: outbound mapping related parameters * @ob_map: outbound mapping related parameters specific to the controller * + * @need_ib_cfg: indicates SW needs to configure the inbound mapping window * @ib: inbound mapping related parameters * @ib_map: outbound mapping region related parameters * @@ -101,6 +102,7 @@ struct iproc_pcie { struct iproc_pcie_ob ob; const struct iproc_pcie_ob_map *ob_map; + bool need_ib_cfg; struct iproc_pcie_ib ib; const struct iproc_pcie_ib_map *ib_map; From c77b388550368068a1615e164b9441937786a1de Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Fri, 17 Nov 2017 14:38:31 +0100 Subject: [PATCH 2094/3326] PCI: keystone: Fix interrupt-controller-node lookup commit eac56aa3bc8af3d9b9850345d0f2da9d83529134 upstream. Fix child-node lookup during initialisation which was using the wrong OF-helper and ended up searching the whole device tree depth-first starting at the parent rather than just matching on its children. To make things worse, the parent pci node could end up being prematurely freed as of_find_node_by_name() drops a reference to its first argument. Any matching child interrupt-controller node was also leaked. Fixes: 0c4ffcfe1fbc ("PCI: keystone: Add TI Keystone PCIe driver") Cc: stable # 3.18 Acked-by: Murali Karicheri Signed-off-by: Johan Hovold [lorenzo.pieralisi@arm.com: updated commit subject] Signed-off-by: Lorenzo Pieralisi Signed-off-by: Greg Kroah-Hartman --- drivers/pci/dwc/pci-keystone.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c index 5bee3af4758887..39405598b22d2f 100644 --- a/drivers/pci/dwc/pci-keystone.c +++ b/drivers/pci/dwc/pci-keystone.c @@ -178,7 +178,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, } /* interrupt controller is in a child node */ - *np_temp = of_find_node_by_name(np_pcie, controller); + *np_temp = of_get_child_by_name(np_pcie, controller); if (!(*np_temp)) { dev_err(dev, "Node for %s is absent\n", controller); return -EINVAL; @@ -187,6 +187,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, temp = of_irq_count(*np_temp); if (!temp) { dev_err(dev, "No IRQ entries in %s\n", controller); + of_node_put(*np_temp); return -EINVAL; } @@ -204,6 +205,8 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, break; } + of_node_put(*np_temp); + if (temp) { *num_irqs = temp; return 0; From 6e6fd5b4ea9d568ac989a61b8d78ae0c5ce19bcf Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Fri, 29 Dec 2017 19:48:43 +0100 Subject: [PATCH 2095/3326] video: fbdev: atmel_lcdfb: fix display-timings lookup commit 9cb18db0701f6b74f0c45c23ad767b3ebebe37f6 upstream. Fix child-node lookup during probe, which ended up searching the whole device tree depth-first starting at the parent rather than just matching on its children. To make things worse, the parent display node was also prematurely freed. Note that the display and timings node references are never put after a successful dt-initialisation so the nodes would leak on later probe deferrals and on driver unbind. Fixes: b985172b328a ("video: atmel_lcdfb: add device tree suport") Cc: stable # 3.13 Cc: Jean-Christophe PLAGNIOL-VILLARD Cc: Nicolas Ferre Cc: Alexandre Belloni Signed-off-by: Johan Hovold Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/atmel_lcdfb.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index e06358da4b9969..3dee267d7c7502 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -1119,7 +1119,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) goto put_display_node; } - timings_np = of_find_node_by_name(display_np, "display-timings"); + timings_np = of_get_child_by_name(display_np, "display-timings"); if (!timings_np) { dev_err(dev, "failed to find display-timings node\n"); ret = -ENODEV; @@ -1140,6 +1140,12 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) fb_add_videomode(&fb_vm, &info->modelist); } + /* + * FIXME: Make sure we are not referencing any fields in display_np + * and timings_np and drop our references to them before returning to + * avoid leaking the nodes on probe deferral and driver unbind. + */ + return 0; put_timings_node: From 3711b5c568cd651ca78d6623d5c0fc839eac9499 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 15 Jan 2018 17:04:22 +0100 Subject: [PATCH 2096/3326] console/dummy: leave .con_font_get set to NULL commit 724ba8b30b044aa0d94b1cd374fc15806cdd6f18 upstream. When this method is set, the caller expects struct console_font fields to be properly initialized when it returns. Leave it unset otherwise nonsensical (leaked kernel stack) values are returned to user space. Signed-off-by: Nicolas Pitre Cc: stable@vger.kernel.org Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Greg Kroah-Hartman --- drivers/video/console/dummycon.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c index 9269d568523973..b90ef96e43d65a 100644 --- a/drivers/video/console/dummycon.c +++ b/drivers/video/console/dummycon.c @@ -67,7 +67,6 @@ const struct consw dummy_con = { .con_switch = DUMMY, .con_blank = DUMMY, .con_font_set = DUMMY, - .con_font_get = DUMMY, .con_font_default = DUMMY, .con_font_copy = DUMMY, }; From 0569dd9beef44d33e4866bf2db2e03fd3644de4f Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Tue, 16 Jan 2018 15:41:54 +0100 Subject: [PATCH 2097/3326] rbd: whitelist RBD_FEATURE_OPERATIONS feature bit commit e573427a440fd67d3f522357d7ac901d59281948 upstream. This feature bit restricts older clients from performing certain maintenance operations against an image (e.g. clone, snap create). krbd does not perform maintenance operations. Cc: stable@vger.kernel.org Signed-off-by: Ilya Dryomov Reviewed-by: Jason Dillaman Signed-off-by: Greg Kroah-Hartman --- drivers/block/rbd.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 609227211295a4..fe4fd8aee19ff1 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -124,11 +124,13 @@ static int atomic_dec_return_safe(atomic_t *v) #define RBD_FEATURE_STRIPINGV2 (1ULL<<1) #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2) #define RBD_FEATURE_DATA_POOL (1ULL<<7) +#define RBD_FEATURE_OPERATIONS (1ULL<<8) #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ RBD_FEATURE_STRIPINGV2 | \ RBD_FEATURE_EXCLUSIVE_LOCK | \ - RBD_FEATURE_DATA_POOL) + RBD_FEATURE_DATA_POOL | \ + RBD_FEATURE_OPERATIONS) /* Features supported by this (client software) implementation. */ From a2fd6c0950337a39abd79046edb43c24ba91d033 Mon Sep 17 00:00:00 2001 From: Simon Gaiser Date: Wed, 7 Feb 2018 21:47:40 +0100 Subject: [PATCH 2098/3326] xen: Fix {set,clear}_foreign_p2m_mapping on autotranslating guests commit 781198f1f373c3e350dbeb3af04a7d4c81c1b8d7 upstream. Commit 82616f9599a7 ("xen: remove tests for pvh mode in pure pv paths") removed the check for autotranslation from {set,clear}_foreign_p2m_mapping but those are called by grant-table.c also on PVH/HVM guests. Cc: # 4.14 Fixes: 82616f9599a7 ("xen: remove tests for pvh mode in pure pv paths") Signed-off-by: Simon Gaiser Reviewed-by: Juergen Gross Signed-off-by: Juergen Gross Signed-off-by: Greg Kroah-Hartman --- arch/x86/xen/p2m.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 6083ba462f350d..15812e553b95eb 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -694,6 +694,9 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, int i, ret = 0; pte_t *pte; + if (xen_feature(XENFEAT_auto_translated_physmap)) + return 0; + if (kmap_ops) { ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, kmap_ops, count); @@ -736,6 +739,9 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, { int i, ret = 0; + if (xen_feature(XENFEAT_auto_translated_physmap)) + return 0; + for (i = 0; i < count; i++) { unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); unsigned long pfn = page_to_pfn(pages[i]); From 0f0fd00739118302568ff8b57ddfc5898c1a3796 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Fri, 2 Feb 2018 17:42:33 +0000 Subject: [PATCH 2099/3326] xenbus: track caller request id commit 29fee6eed2811ff1089b30fc579a2d19d78016ab upstream. Commit fd8aa9095a95 ("xen: optimize xenbus driver for multiple concurrent xenstore accesses") optimized xenbus concurrent accesses but in doing so broke UABI of /dev/xen/xenbus. Through /dev/xen/xenbus applications are in charge of xenbus message exchange with the correct header and body. Now, after the mentioned commit the replies received by application will no longer have the header req_id echoed back as it was on request (see specification below for reference), because that particular field is being overwritten by kernel. struct xsd_sockmsg { uint32_t type; /* XS_??? */ uint32_t req_id;/* Request identifier, echoed in daemon's response. */ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ uint32_t len; /* Length of data following this. */ /* Generally followed by nul-terminated string(s). */ }; Before there was only one request at a time so req_id could simply be forwarded back and forth. To allow simultaneous requests we need a different req_id for each message thus kernel keeps a monotonic increasing counter for this field and is written on every request irrespective of userspace value. Forwarding again the req_id on userspace requests is not a solution because we would open the possibility of userspace-generated req_id colliding with kernel ones. So this patch instead takes another route which is to artificially keep user req_id while keeping the xenbus logic as is. We do that by saving the original req_id before xs_send(), use the private kernel counter as req_id and then once reply comes and was validated, we restore back the original req_id. Cc: # 4.11 Fixes: fd8aa9095a ("xen: optimize xenbus driver for multiple concurrent xenstore accesses") Reported-by: Bhavesh Davda Signed-off-by: Joao Martins Reviewed-by: Juergen Gross Signed-off-by: Juergen Gross Signed-off-by: Greg Kroah-Hartman --- drivers/xen/xenbus/xenbus.h | 1 + drivers/xen/xenbus/xenbus_comms.c | 1 + drivers/xen/xenbus/xenbus_xs.c | 3 +++ 3 files changed, 5 insertions(+) diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h index 149c5e7efc89e7..092981171df17f 100644 --- a/drivers/xen/xenbus/xenbus.h +++ b/drivers/xen/xenbus/xenbus.h @@ -76,6 +76,7 @@ struct xb_req_data { struct list_head list; wait_queue_head_t wq; struct xsd_sockmsg msg; + uint32_t caller_req_id; enum xsd_sockmsg_type type; char *body; const struct kvec *vec; diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index 5b081a01779de8..d239fc3c5e3ded 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c @@ -309,6 +309,7 @@ static int process_msg(void) goto out; if (req->state == xb_req_state_wait_reply) { + req->msg.req_id = req->caller_req_id; req->msg.type = state.msg.type; req->msg.len = state.msg.len; req->body = state.body; diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 3e59590c7254dd..3f3b29398ab8e2 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -227,6 +227,8 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg) req->state = xb_req_state_queued; init_waitqueue_head(&req->wq); + /* Save the caller req_id and restore it later in the reply */ + req->caller_req_id = req->msg.req_id; req->msg.req_id = xs_request_enter(req); mutex_lock(&xb_write_mutex); @@ -310,6 +312,7 @@ static void *xs_talkv(struct xenbus_transaction t, req->num_vecs = num_vecs; req->cb = xs_wake_up; + msg.req_id = 0; msg.tx_id = t.id; msg.type = type; msg.len = 0; From 29b4af70409cfca62d045cd4e0170912ab262c66 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Wed, 15 Nov 2017 11:34:58 +0100 Subject: [PATCH 2100/3326] seq_file: fix incomplete reset on read from zero offset commit cf5eebae2cd28d37581507668605f4d23cd7218d upstream. When resetting iterator on a zero offset we need to discard any data already in the buffer (count), and private state of the iterator (version). For example this bug results in first line being repeated in /proc/mounts if doing a zero size read before a non-zero size read. Reported-by: Rich Felker Signed-off-by: Miklos Szeredi Fixes: e522751d605d ("seq_file: reset iterator to first record for zero offset") Cc: # v4.10 Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- fs/seq_file.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/seq_file.c b/fs/seq_file.c index 4be761c1a03d1f..eea09f6d883056 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -181,8 +181,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) * if request is to read from zero offset, reset iterator to first * record as it might have been already advanced by previous requests */ - if (*ppos == 0) + if (*ppos == 0) { m->index = 0; + m->version = 0; + m->count = 0; + } /* Don't assume *ppos is where we left it */ if (unlikely(*ppos != m->read_pos)) { From 294975841483c08e84572713f348cd51b8408021 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 5 Feb 2018 22:18:11 -0500 Subject: [PATCH 2101/3326] tracing: Fix parsing of globs with a wildcard at the beginning commit 07234021410bbc27b7c86c18de98616c29fbe667 upstream. Al Viro reported: For substring - sure, but what about something like "*a*b" and "a*b"? AFAICS, filter_parse_regex() ends up with identical results in both cases - MATCH_GLOB and *search = "a*b". And no way for the caller to tell one from another. Testing this with the following: # cd /sys/kernel/tracing # echo '*raw*lock' > set_ftrace_filter bash: echo: write error: Invalid argument With this patch: # echo '*raw*lock' > set_ftrace_filter # cat set_ftrace_filter _raw_read_trylock _raw_write_trylock _raw_read_unlock _raw_spin_unlock _raw_write_unlock _raw_spin_trylock _raw_spin_lock _raw_write_lock _raw_read_lock Al recommended not setting the search buffer to skip the first '*' unless we know we are not using MATCH_GLOB. This implements his suggested logic. Link: http://lkml.kernel.org/r/20180127170748.GF13338@ZenIV.linux.org.uk Cc: stable@vger.kernel.org Fixes: 60f1d5e3bac44 ("ftrace: Support full glob matching") Reviewed-by: Masami Hiramatsu Reported-by: Al Viro Suggsted-by: Al Viro Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_events_filter.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 61e7f0678d3359..a764aec3c9a17a 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -400,7 +400,6 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not) for (i = 0; i < len; i++) { if (buff[i] == '*') { if (!i) { - *search = buff + 1; type = MATCH_END_ONLY; } else if (i == len - 1) { if (type == MATCH_END_ONLY) @@ -410,14 +409,14 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not) buff[i] = 0; break; } else { /* pattern continues, use full glob */ - type = MATCH_GLOB; - break; + return MATCH_GLOB; } } else if (strchr("[?\\", buff[i])) { - type = MATCH_GLOB; - break; + return MATCH_GLOB; } } + if (buff[0] == '*') + *search = buff + 1; return type; } From 753fc48e595a24eac4d4476a421c36307a7fbcb5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 7 Feb 2018 22:34:24 -0800 Subject: [PATCH 2102/3326] mpls, nospec: Sanitize array index in mpls_label_ok() commit 3968523f855050b8195134da951b87c20bd66130 upstream. mpls_label_ok() validates that the 'platform_label' array index from a userspace netlink message payload is valid. Under speculation the mpls_label_ok() result may not resolve in the CPU pipeline until after the index is used to access an array element. Sanitize the index to zero to prevent userspace-controlled arbitrary out-of-bounds speculation, a precursor for a speculative execution side channel vulnerability. Cc: Cc: "David S. Miller" Cc: Eric W. Biederman Signed-off-by: Dan Williams Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/mpls/af_mpls.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index c5b9ce41d66fad..aee385eb72e720 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -904,24 +905,27 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg, return err; } -static bool mpls_label_ok(struct net *net, unsigned int index, +static bool mpls_label_ok(struct net *net, unsigned int *index, struct netlink_ext_ack *extack) { + bool is_ok = true; + /* Reserved labels may not be set */ - if (index < MPLS_LABEL_FIRST_UNRESERVED) { + if (*index < MPLS_LABEL_FIRST_UNRESERVED) { NL_SET_ERR_MSG(extack, "Invalid label - must be MPLS_LABEL_FIRST_UNRESERVED or higher"); - return false; + is_ok = false; } /* The full 20 bit range may not be supported. */ - if (index >= net->mpls.platform_labels) { + if (is_ok && *index >= net->mpls.platform_labels) { NL_SET_ERR_MSG(extack, "Label >= configured maximum in platform_labels"); - return false; + is_ok = false; } - return true; + *index = array_index_nospec(*index, net->mpls.platform_labels); + return is_ok; } static int mpls_route_add(struct mpls_route_config *cfg, @@ -944,7 +948,7 @@ static int mpls_route_add(struct mpls_route_config *cfg, index = find_free_label(net); } - if (!mpls_label_ok(net, index, extack)) + if (!mpls_label_ok(net, &index, extack)) goto errout; /* Append makes no sense with mpls */ @@ -1021,7 +1025,7 @@ static int mpls_route_del(struct mpls_route_config *cfg, index = cfg->rc_label; - if (!mpls_label_ok(net, index, extack)) + if (!mpls_label_ok(net, &index, extack)) goto errout; mpls_route_update(net, index, NULL, &cfg->rc_nlinfo); @@ -1779,7 +1783,7 @@ static int rtm_to_route_config(struct sk_buff *skb, goto errout; if (!mpls_label_ok(cfg->rc_nlinfo.nl_net, - cfg->rc_label, extack)) + &cfg->rc_label, extack)) goto errout; break; } @@ -2106,7 +2110,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, goto errout; } - if (!mpls_label_ok(net, in_label, extack)) { + if (!mpls_label_ok(net, &in_label, extack)) { err = -EINVAL; goto errout; } From ff59e379234bb4b479deca97e29f685eabe22954 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Mon, 5 Feb 2018 12:38:11 -0600 Subject: [PATCH 2103/3326] rtlwifi: rtl8821ae: Fix connection lost problem correctly commit c713fb071edc0efc01a955f65a006b0e1795d2eb upstream. There has been a coding error in rtl8821ae since it was first introduced, namely that an 8-bit register was read using a 16-bit read in _rtl8821ae_dbi_read(). This error was fixed with commit 40b368af4b75 ("rtlwifi: Fix alignment issues"); however, this change led to instability in the connection. To restore stability, this change was reverted in commit b8b8b16352cd ("rtlwifi: rtl8821ae: Fix connection lost problem"). Unfortunately, the unaligned access causes machine checks in ARM architecture, and we were finally forced to find the actual cause of the problem on x86 platforms. Following a suggestion from Pkshih , it was found that increasing the ASPM L1 latency from 0 to 7 fixed the instability. This parameter was varied to see if a smaller value would work; however, it appears that 7 is the safest value. A new symbol is defined for this quantity, thus it can be easily changed if necessary. Fixes: b8b8b16352cd ("rtlwifi: rtl8821ae: Fix connection lost problem") Cc: Stable # 4.14+ Fix-suggested-by: Pkshih Signed-off-by: Larry Finger Tested-by: James Cameron # x86_64 OLPC NL3 Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | 5 +++-- drivers/net/wireless/realtek/rtlwifi/wifi.h | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 9ac1511de7ba2f..b82e5b363c05b3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -1122,7 +1122,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr) } if (0 == tmp) { read_addr = REG_DBI_RDATA + addr % 4; - ret = rtl_read_word(rtlpriv, read_addr); + ret = rtl_read_byte(rtlpriv, read_addr); } return ret; } @@ -1164,7 +1164,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw) } tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f); - _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7)); + _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) | + ASPM_L1_LATENCY << 3); tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719); _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4)); diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 1ab1024330fbb9..25c4e3e5592168 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -99,6 +99,7 @@ #define RTL_USB_MAX_RX_COUNT 100 #define QBSS_LOAD_SIZE 5 #define MAX_WMMELE_LENGTH 64 +#define ASPM_L1_LATENCY 7 #define TOTAL_CAM_ENTRY 32 From 851e2ea82303f3a543b32f5cc81adbc1929d053a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 13 Feb 2018 13:14:09 +0000 Subject: [PATCH 2104/3326] arm64: proc: Set PTE_NG for table entries to avoid traversing them twice commit 2ce77f6d8a9ae9ce6d80397d88bdceb84a2004cd upstream. When KASAN is enabled, the swapper page table contains many identical mappings of the zero page, which can lead to a stall during boot whilst the G -> nG code continually walks the same page table entries looking for global mappings. This patch sets the nG bit (bit 11, which is IGNORED) in table entries after processing the subtree so we can easily skip them if we see them a second time. Tested-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Greg Kroah-Hartman --- arch/arm64/mm/proc.S | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 27058f3fd1320d..329a1c43365efe 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -190,7 +190,8 @@ ENDPROC(idmap_cpu_replace_ttbr1) dc cvac, cur_\()\type\()p // Ensure any existing dirty dmb sy // lines are written back before ldr \type, [cur_\()\type\()p] // loading the entry - tbz \type, #0, next_\()\type // Skip invalid entries + tbz \type, #0, skip_\()\type // Skip invalid and + tbnz \type, #11, skip_\()\type // non-global entries .endm .macro __idmap_kpti_put_pgtable_ent_ng, type @@ -249,8 +250,9 @@ ENTRY(idmap_kpti_install_ng_mappings) add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) do_pgd: __idmap_kpti_get_pgtable_ent pgd tbnz pgd, #1, walk_puds - __idmap_kpti_put_pgtable_ent_ng pgd next_pgd: + __idmap_kpti_put_pgtable_ent_ng pgd +skip_pgd: add cur_pgdp, cur_pgdp, #8 cmp cur_pgdp, end_pgdp b.ne do_pgd @@ -278,8 +280,9 @@ walk_puds: add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) do_pud: __idmap_kpti_get_pgtable_ent pud tbnz pud, #1, walk_pmds - __idmap_kpti_put_pgtable_ent_ng pud next_pud: + __idmap_kpti_put_pgtable_ent_ng pud +skip_pud: add cur_pudp, cur_pudp, 8 cmp cur_pudp, end_pudp b.ne do_pud @@ -298,8 +301,9 @@ walk_pmds: add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) do_pmd: __idmap_kpti_get_pgtable_ent pmd tbnz pmd, #1, walk_ptes - __idmap_kpti_put_pgtable_ent_ng pmd next_pmd: + __idmap_kpti_put_pgtable_ent_ng pmd +skip_pmd: add cur_pmdp, cur_pmdp, #8 cmp cur_pmdp, end_pmdp b.ne do_pmd @@ -317,7 +321,7 @@ walk_ptes: add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) do_pte: __idmap_kpti_get_pgtable_ent pte __idmap_kpti_put_pgtable_ent_ng pte -next_pte: +skip_pte: add cur_ptep, cur_ptep, #8 cmp cur_ptep, end_ptep b.ne do_pte From dc0b764a7c1a3a03b56c5582bf9a9c4554bf5e96 Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Thu, 19 Oct 2017 08:21:50 +0200 Subject: [PATCH 2105/3326] qxl: alloc & use shadow for dumb buffers commit 62676d10b483a2ff6e8b08c5e7c7d63a831343f5 upstream. This patch changes the way the primary surface is used for dumb framebuffers. Instead of configuring the bo itself as primary surface a shadow bo is created and used instead. Framebuffers can share the shadow bo in case they have the same format and resolution. On atomic plane updates we don't have to update the primary surface in case we pageflip from one framebuffer to another framebuffer which shares the same shadow. This in turn avoids the flicker caused by the primary-destroy + primary-create cycle, which is very annonying when running wayland on qxl. The qxl driver never actually writes to the shadow bo. It sends qxl blit commands which update it though, and the spice server might actually execute them (and thereby write to the shadow) in case the local rendering is kicked for some reason. This happens for example in case qemu is asked to write out a dump of the guest display (screendump monitor command). Signed-off-by: Gerd Hoffmann Reviewed-by: Dave Airlie Link: http://patchwork.freedesktop.org/patch/msgid/20171019062150.28090-3-kraxel@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/qxl/qxl_cmd.c | 6 +++- drivers/gpu/drm/qxl/qxl_display.c | 49 +++++++++++++++++++++++++++++-- drivers/gpu/drm/qxl/qxl_drv.h | 2 ++ drivers/gpu/drm/qxl/qxl_dumb.c | 1 + 4 files changed, 54 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 74fc9362ecf98f..3eb92085114173 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -388,7 +388,11 @@ void qxl_io_create_primary(struct qxl_device *qdev, create->width = bo->surf.width; create->height = bo->surf.height; create->stride = bo->surf.stride; - create->mem = qxl_bo_physical_address(qdev, bo, offset); + if (bo->shadow) { + create->mem = qxl_bo_physical_address(qdev, bo->shadow, offset); + } else { + create->mem = qxl_bo_physical_address(qdev, bo, offset); + } QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem, bo->kptr); diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index ced6564b5814a6..7335d99244d5b1 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -305,7 +305,9 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = { void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); + struct qxl_bo *bo = gem_to_qxl_bo(qxl_fb->obj); + WARN_ON(bo->shadow); drm_gem_object_unreference_unlocked(qxl_fb->obj); drm_framebuffer_cleanup(fb); kfree(qxl_fb); @@ -508,6 +510,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, .x2 = qfb->base.width, .y2 = qfb->base.height }; + bool same_shadow = false; if (old_state->fb) { qfb_old = to_qxl_framebuffer(old_state->fb); @@ -519,15 +522,23 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, if (bo == bo_old) return; + if (bo_old && bo_old->shadow && bo->shadow && + bo_old->shadow == bo->shadow) { + same_shadow = true; + } + if (bo_old && bo_old->is_primary) { - qxl_io_destroy_primary(qdev); + if (!same_shadow) + qxl_io_destroy_primary(qdev); bo_old->is_primary = false; } if (!bo->is_primary) { - qxl_io_create_primary(qdev, 0, bo); + if (!same_shadow) + qxl_io_create_primary(qdev, 0, bo); bo->is_primary = true; } + qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1); } @@ -681,8 +692,9 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane, static int qxl_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { + struct qxl_device *qdev = plane->dev->dev_private; struct drm_gem_object *obj; - struct qxl_bo *user_bo; + struct qxl_bo *user_bo, *old_bo = NULL; int ret; if (!new_state->fb) @@ -691,6 +703,32 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane, obj = to_qxl_framebuffer(new_state->fb)->obj; user_bo = gem_to_qxl_bo(obj); + if (plane->type == DRM_PLANE_TYPE_PRIMARY && + user_bo->is_dumb && !user_bo->shadow) { + if (plane->state->fb) { + obj = to_qxl_framebuffer(plane->state->fb)->obj; + old_bo = gem_to_qxl_bo(obj); + } + if (old_bo && old_bo->shadow && + user_bo->gem_base.size == old_bo->gem_base.size && + plane->state->crtc == new_state->crtc && + plane->state->crtc_w == new_state->crtc_w && + plane->state->crtc_h == new_state->crtc_h && + plane->state->src_x == new_state->src_x && + plane->state->src_y == new_state->src_y && + plane->state->src_w == new_state->src_w && + plane->state->src_h == new_state->src_h && + plane->state->rotation == new_state->rotation && + plane->state->zpos == new_state->zpos) { + drm_gem_object_get(&old_bo->shadow->gem_base); + user_bo->shadow = old_bo->shadow; + } else { + qxl_bo_create(qdev, user_bo->gem_base.size, + true, true, QXL_GEM_DOMAIN_VRAM, NULL, + &user_bo->shadow); + } + } + ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); if (ret) return ret; @@ -715,6 +753,11 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane, obj = to_qxl_framebuffer(old_state->fb)->obj; user_bo = gem_to_qxl_bo(obj); qxl_bo_unpin(user_bo); + + if (user_bo->shadow && !user_bo->is_primary) { + drm_gem_object_put_unlocked(&user_bo->shadow->gem_base); + user_bo->shadow = NULL; + } } static const uint32_t qxl_cursor_plane_formats[] = { diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 3397a190733693..8f3667de1537cb 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -113,6 +113,8 @@ struct qxl_bo { /* Constant after initialization */ struct drm_gem_object gem_base; bool is_primary; /* is this now a primary surface */ + bool is_dumb; + struct qxl_bo *shadow; bool hw_surf_alloc; struct qxl_surface surf; uint32_t surface_id; diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c index 5e65d5d2d937d5..11085ab0137425 100644 --- a/drivers/gpu/drm/qxl/qxl_dumb.c +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -63,6 +63,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, &handle); if (r) return r; + qobj->is_dumb = true; args->pitch = pitch; args->handle = handle; return 0; From 848dd9bf51544c8e5436960124f3d1f9c51cdb99 Mon Sep 17 00:00:00 2001 From: Ray Strode Date: Mon, 27 Nov 2017 16:50:10 -0500 Subject: [PATCH 2106/3326] drm/qxl: reapply cursor after resetting primary commit 9428088c90b6f7d5edd2a1b0d742c75339b36f6e upstream. QXL associates mouse state with its primary plane. Destroying a primary plane and putting a new one in place has the side effect of destroying the cursor as well. This commit changes the driver to reapply the cursor any time a new primary is created. It achieves this by keeping a reference to the cursor bo on the qxl_crtc struct. This fix is very similar to commit 4532b241a4b7 ("drm/qxl: reapply cursor after SetCrtc calls") which got implicitly reverted as part of implementing the atomic modeset feature. Cc: Gerd Hoffmann Cc: Dave Airlie Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1512097 Fixes: 1277eed5fecb ("drm: qxl: Atomic phase 1: convert cursor to universal plane") Cc: stable@vger.kernel.org Signed-off-by: Ray Strode Signed-off-by: Dave Airlie Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/qxl/qxl_display.c | 59 +++++++++++++++++++++++++++++++ drivers/gpu/drm/qxl/qxl_drv.h | 2 ++ 2 files changed, 61 insertions(+) diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 7335d99244d5b1..9a9214ae0fb50d 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -289,6 +289,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc) { struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc); + qxl_bo_unref(&qxl_crtc->cursor_bo); drm_crtc_cleanup(crtc); kfree(qxl_crtc); } @@ -495,6 +496,53 @@ static int qxl_primary_atomic_check(struct drm_plane *plane, return 0; } +static int qxl_primary_apply_cursor(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct qxl_device *qdev = dev->dev_private; + struct drm_framebuffer *fb = plane->state->fb; + struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc); + struct qxl_cursor_cmd *cmd; + struct qxl_release *release; + int ret = 0; + + if (!qcrtc->cursor_bo) + return 0; + + ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), + QXL_RELEASE_CURSOR_CMD, + &release, NULL); + if (ret) + return ret; + + ret = qxl_release_list_add(release, qcrtc->cursor_bo); + if (ret) + goto out_free_release; + + ret = qxl_release_reserve_list(release, false); + if (ret) + goto out_free_release; + + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); + cmd->type = QXL_CURSOR_SET; + cmd->u.set.position.x = plane->state->crtc_x + fb->hot_x; + cmd->u.set.position.y = plane->state->crtc_y + fb->hot_y; + + cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0); + + cmd->u.set.visible = 1; + qxl_release_unmap(qdev, release, &cmd->release_info); + + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); + qxl_release_fence_buffer_objects(release); + + return ret; + +out_free_release: + qxl_release_free(qdev, release); + return ret; +} + static void qxl_primary_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { @@ -510,6 +558,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, .x2 = qfb->base.width, .y2 = qfb->base.height }; + int ret; bool same_shadow = false; if (old_state->fb) { @@ -531,6 +580,11 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, if (!same_shadow) qxl_io_destroy_primary(qdev); bo_old->is_primary = false; + + ret = qxl_primary_apply_cursor(plane); + if (ret) + DRM_ERROR( + "could not set cursor after creating primary"); } if (!bo->is_primary) { @@ -571,6 +625,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, struct drm_device *dev = plane->dev; struct qxl_device *qdev = dev->dev_private; struct drm_framebuffer *fb = plane->state->fb; + struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc); struct qxl_release *release; struct qxl_cursor_cmd *cmd; struct qxl_cursor *cursor; @@ -628,6 +683,10 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); cmd->type = QXL_CURSOR_SET; + + qxl_bo_unref(&qcrtc->cursor_bo); + qcrtc->cursor_bo = cursor_bo; + cursor_bo = NULL; } else { ret = qxl_release_reserve_list(release, true); diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 8f3667de1537cb..c0a927efa65382 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -135,6 +135,8 @@ struct qxl_bo_list { struct qxl_crtc { struct drm_crtc base; int index; + + struct qxl_bo *cursor_bo; }; struct qxl_output { From 84b41e3708ac9869190dd09d47b548392b7a656a Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 31 Jan 2018 12:34:05 -0500 Subject: [PATCH 2107/3326] xprtrdma: Fix calculation of ri_max_send_sges commit 1179e2c27efe21167ec9d882b14becefba2ee990 upstream. Commit 16f906d66cd7 ("xprtrdma: Reduce required number of send SGEs") introduced the rpcrdma_ia::ri_max_send_sges field. This fixes a problem where xprtrdma would not work if the device's max_sge capability was small (low single digits). At least RPCRDMA_MIN_SEND_SGES are needed for the inline parts of each RPC. ri_max_send_sges is set to this value: ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES; Then when marshaling each RPC, rpcrdma_args_inline uses that value to determine whether the device has enough Send SGEs to convey an NFS WRITE payload inline, or whether instead a Read chunk is required. More recently, commit ae72950abf99 ("xprtrdma: Add data structure to manage RDMA Send arguments") used the ri_max_send_sges value to calculate the size of an array, but that commit erroneously assumed ri_max_send_sges contains a value similar to the device's max_sge, and not one that was reduced by the minimum SGE count. This assumption results in the calculated size of the sendctx's Send SGE array to be too small. When the array is used to marshal an RPC, the code can write Send SGEs into the following sendctx element in that array, corrupting it. When the device's max_sge is large, this issue is entirely harmless; but it results in an oops in the provider's post_send method, if dev.attrs.max_sge is small. So let's straighten this out: ri_max_send_sges will now contain a value with the same meaning as dev.attrs.max_sge, which makes the code easier to understand, and enables rpcrdma_sendctx_create to calculate the size of the SGE array correctly. Reported-by: Michal Kalderon Fixes: 16f906d66cd7 ("xprtrdma: Reduce required number of send SGEs") Signed-off-by: Chuck Lever Tested-by: Michal Kalderon Cc: stable@vger.kernel.org # v4.10+ Signed-off-by: Anna Schumaker Signed-off-by: Greg Kroah-Hartman --- net/sunrpc/xprtrdma/rpc_rdma.c | 2 +- net/sunrpc/xprtrdma/verbs.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index f1889f4d48030f..491ae9fc561f3e 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -142,7 +142,7 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, if (xdr->page_len) { remaining = xdr->page_len; offset = offset_in_page(xdr->page_base); - count = 0; + count = RPCRDMA_MIN_SEND_SGES; while (remaining) { remaining -= min_t(unsigned int, PAGE_SIZE - offset, remaining); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 11a1fbf7e59e08..cdf51c272c6259 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -523,7 +523,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); return -ENOMEM; } - ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES; + ia->ri_max_send_sges = max_sge; if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) { dprintk("RPC: %s: insufficient wqe's available\n", From 67154fb8012152aed14dbd70e5b7fc79dcfd53f4 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 31 Jan 2018 12:34:13 -0500 Subject: [PATCH 2108/3326] xprtrdma: Fix BUG after a device removal commit e89e8d8fcdc6751e86ccad794b052fe67e6ad619 upstream. Michal Kalderon reports a BUG that occurs just after device removal: [ 169.112490] rpcrdma: removing device qedr0 for 192.168.110.146:20049 [ 169.143909] BUG: unable to handle kernel NULL pointer dereference at 0000000000000010 [ 169.181837] IP: rpcrdma_dma_unmap_regbuf+0xa/0x60 [rpcrdma] The RPC/RDMA client transport attempts to allocate some resources on demand. Registered buffers are one such resource. These are allocated (or re-allocated) by xprt_rdma_allocate to hold RPC Call and Reply messages. A hardware resource is associated with each of these buffers, as they can be used for a Send or Receive Work Request. If a device is removed from under an NFS/RDMA mount, the transport layer is responsible for releasing all hardware resources before the device can be finally unplugged. A BUG results when the NFS mount hasn't yet seen much activity: the transport tries to release resources that haven't yet been allocated. rpcrdma_free_regbuf() already checks for this case, so just move that check to cover the DEVICE_REMOVAL case as well. Reported-by: Michal Kalderon Fixes: bebd031866ca ("xprtrdma: Support unplugging an HCA ...") Signed-off-by: Chuck Lever Tested-by: Michal Kalderon Cc: stable@vger.kernel.org # v4.12+ Signed-off-by: Anna Schumaker Signed-off-by: Greg Kroah-Hartman --- net/sunrpc/xprtrdma/verbs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index cdf51c272c6259..9e8e1de19b2e5c 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -1331,6 +1331,9 @@ __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb) { + if (!rb) + return; + if (!rpcrdma_regbuf_is_mapped(rb)) return; @@ -1346,9 +1349,6 @@ rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb) void rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb) { - if (!rb) - return; - rpcrdma_dma_unmap_regbuf(rb); kfree(rb); } From 0528a533f368c93efe752dbd52698dcea214554a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 5 Feb 2018 13:16:56 -0700 Subject: [PATCH 2109/3326] blk-wbt: account flush requests correctly commit 5235553d821433e1f4fa720fd025d2c4b7ee9994 upstream. Mikulas reported a workload that saw bad performance, and figured out what it was due to various other types of requests being accounted as reads. Flush requests, for instance. Due to the high latency of those, we heavily throttle the writes to keep the latencies in balance. But they really should be accounted as writes. Fix this by checking the exact type of the request. If it's a read, account as a read, if it's a write or a flush, account as a write. Any other request we disregard. Previously everything would have been mistakenly accounted as reads. Reported-by: Mikulas Patocka Cc: stable@vger.kernel.org # v4.12+ Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- block/blk-wbt.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index e59d59c11ebbb3..5c105514bca72b 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -698,7 +698,15 @@ u64 wbt_default_latency_nsec(struct request_queue *q) static int wbt_data_dir(const struct request *rq) { - return rq_data_dir(rq); + const int op = req_op(rq); + + if (op == REQ_OP_READ) + return READ; + else if (op == REQ_OP_WRITE || op == REQ_OP_FLUSH) + return WRITE; + + /* don't account */ + return -1; } int wbt_init(struct request_queue *q) From 4cbb9fdf133cc5a5dcb1ac7153791792976c62c9 Mon Sep 17 00:00:00 2001 From: David Disseldorp Date: Wed, 13 Dec 2017 18:22:30 +0100 Subject: [PATCH 2110/3326] target/iscsi: avoid NULL dereference in CHAP auth error path commit ce512d79d0466a604793addb6b769d12ee326822 upstream. If chap_server_compute_md5() fails early, e.g. via CHAP_N mismatch, then crypto_free_shash() is called with a NULL pointer which gets dereferenced in crypto_shash_tfm(). Fixes: 69110e3cedbb ("iscsi-target: Use shash and ahash") Suggested-by: Markus Elfring Signed-off-by: David Disseldorp Cc: stable@vger.kernel.org # 4.6+ Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/iscsi/iscsi_target_auth.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index f9bc8ec6fb6b5e..9518ffd8b8bac8 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -421,7 +421,8 @@ static int chap_server_compute_md5( auth_ret = 0; out: kzfree(desc); - crypto_free_shash(tfm); + if (tfm) + crypto_free_shash(tfm); kfree(challenge); kfree(challenge_binhex); return auth_ret; From c8d0f63c15851e7d6f41b455d74e4b2b0a862a09 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 19 Jan 2018 14:36:29 +0100 Subject: [PATCH 2111/3326] iscsi-target: make sure to wake up sleeping login worker commit 1c130ae00b769a2e2df41bad3d6051ee8234b636 upstream. Mike Christie reports: Starting in 4.14 iscsi logins will fail around 50% of the time. Problem appears to be that iscsi_target_sk_data_ready() callback may return without doing anything in case it finds the login work queue is still blocked in sock_recvmsg(). Nicholas Bellinger says: It would indicate users providing their own ->sk_data_ready() callback must be responsible for waking up a kthread context blocked on sock_recvmsg(..., MSG_WAITALL), when a second ->sk_data_ready() is received before the first sock_recvmsg(..., MSG_WAITALL) completes. So, do this and invoke the original data_ready() callback -- in case of tcp sockets this takes care of waking the thread. Disclaimer: I do not understand why this problem did not show up before tcp prequeue removal. (Drop WARN_ON usage - nab) Reported-by: Mike Christie Bisected-by: Mike Christie Tested-by: Mike Christie Diagnosed-by: Nicholas Bellinger Fixes: e7942d0633c4 ("tcp: remove prequeue support") Signed-off-by: Florian Westphal Cc: stable@vger.kernel.org # 4.14+ Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/iscsi/iscsi_target_nego.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 7a6751fecd323c..87248a2512e597 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -432,6 +432,9 @@ static void iscsi_target_sk_data_ready(struct sock *sk) if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { write_unlock_bh(&sk->sk_callback_lock); pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn); + if (iscsi_target_sk_data_ready == conn->orig_data_ready) + return; + conn->orig_data_ready(sk); return; } From f6318abd3a5458be821eb417969726622e1e09c4 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 15 Feb 2018 20:00:15 +1100 Subject: [PATCH 2112/3326] dm: correctly handle chained bios in dec_pending() commit 8dd601fa8317243be887458c49f6c29c2f3d719f upstream. dec_pending() is given an error status (possibly 0) to be recorded against a bio. It can be called several times on the one 'struct dm_io', and it is careful to only assign a non-zero error to io->status. However when it then assigned io->status to bio->bi_status, it is not careful and could overwrite a genuine error status with 0. This can happen when chained bios are in use. If a bio is chained beneath the bio that this dm_io is handling, the child bio might complete and set bio->bi_status before the dm_io completes. This has been possible since chained bios were introduced in 3.14, and has become a lot easier to trigger with commit 18a25da84354 ("dm: ensure bio submission follows a depth-first tree walk") as that commit caused dm to start using chained bios itself. A particular failure mode is that if a bio spans an 'error' target and a working target, the 'error' fragment will complete instantly and set the ->bi_status, and the other fragment will normally complete a little later, and will clear ->bi_status. The fix is simply to only assign io_error to bio->bi_status when io_error is not zero. Reported-and-tested-by: Milan Broz Cc: stable@vger.kernel.org (v3.14+) Signed-off-by: NeilBrown Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 804419635cc7d3..1dfc855ac7085b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -815,7 +815,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error) queue_io(md, bio); } else { /* done with normal IO or empty flush */ - bio->bi_status = io_error; + if (io_error) + bio->bi_status = io_error; bio_endio(bio); } } From c766cb48771b7604fb5beca646bfa9904d0a2b17 Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Thu, 25 Jan 2018 11:02:50 -0700 Subject: [PATCH 2113/3326] Btrfs: fix deadlock in run_delalloc_nocow commit e89166990f11c3f21e1649d760dd35f9e410321c upstream. @cur_offset is not set back to what it should be (@cow_start) if btrfs_next_leaf() returns something wrong, and the range [cow_start, cur_offset) remains locked forever. cc: Signed-off-by: Liu Bo Reviewed-by: Josef Bacik Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/inode.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5eaedff28a32d6..db3f67b581044e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1330,8 +1330,11 @@ static noinline int run_delalloc_nocow(struct inode *inode, leaf = path->nodes[0]; if (path->slots[0] >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); - if (ret < 0) + if (ret < 0) { + if (cow_start != (u64)-1) + cur_offset = cow_start; goto error; + } if (ret > 0) break; leaf = path->nodes[0]; From fda3bb933b332a206730300a40ee71023548d0b0 Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Thu, 25 Jan 2018 11:02:51 -0700 Subject: [PATCH 2114/3326] Btrfs: fix crash due to not cleaning up tree log block's dirty bits commit 1846430c24d66e85cc58286b3319c82cd54debb2 upstream. In cases that the whole fs flips into readonly status due to failures in critical sections, then log tree's blocks are still dirty, and this leads to a crash during umount time, the crash is about use-after-free, umount -> close_ctree -> stop workers -> iput(btree_inode) -> iput_final -> write_inode_now -> ... -> queue job on stop'd workers cc: v3.12+ Fixes: 681ae50917df ("Btrfs: cleanup reserved space when freeing tree log on error") Signed-off-by: Liu Bo Reviewed-by: Josef Bacik Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/tree-log.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index d3002842d7f68d..6e1a146f6ab453 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2494,6 +2494,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, clean_tree_block(fs_info, next); btrfs_wait_tree_block_writeback(next); btrfs_tree_unlock(next); + } else { + if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) + clear_extent_buffer_dirty(next); } WARN_ON(root_owner != @@ -2574,6 +2577,9 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, clean_tree_block(fs_info, next); btrfs_wait_tree_block_writeback(next); btrfs_tree_unlock(next); + } else { + if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) + clear_extent_buffer_dirty(next); } WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); @@ -2652,6 +2658,9 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, clean_tree_block(fs_info, next); btrfs_wait_tree_block_writeback(next); btrfs_tree_unlock(next); + } else { + if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) + clear_extent_buffer_dirty(next); } WARN_ON(log->root_key.objectid != From 9a701c4fa563d3102b291de2531e0288ac4d7ee1 Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Thu, 25 Jan 2018 11:02:52 -0700 Subject: [PATCH 2115/3326] Btrfs: fix extent state leak from tree log commit 55237a5f2431a72435e3ed39e4306e973c0446b7 upstream. It's possible that btrfs_sync_log() bails out after one of the two btrfs_write_marked_extents() which convert extent state's state bit into EXTENT_NEED_WAIT from EXTENT_DIRTY/EXTENT_NEW, however only EXTENT_DIRTY and EXTENT_NEW are searched by free_log_tree() so that those extent states with EXTENT_NEED_WAIT lead to memory leak. cc: Signed-off-by: Liu Bo Reviewed-by: Josef Bacik Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/tree-log.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 6e1a146f6ab453..75b964ff335b3b 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3047,13 +3047,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans, while (1) { ret = find_first_extent_bit(&log->dirty_log_pages, - 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW, + 0, &start, &end, + EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT, NULL); if (ret) break; clear_extent_bits(&log->dirty_log_pages, start, end, - EXTENT_DIRTY | EXTENT_NEW); + EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT); } /* From 1371798b92c84ddb06e0630fc1e2a9fcdeec2b57 Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Thu, 25 Jan 2018 11:02:53 -0700 Subject: [PATCH 2116/3326] Btrfs: fix btrfs_evict_inode to handle abnormal inodes correctly commit e8f1bc1493855e32b7a2a019decc3c353d94daf6 upstream. This regression is introduced in commit 3d48d9810de4 ("btrfs: Handle uninitialised inode eviction"). There are two problems, a) it is ->destroy_inode() that does the final free on inode, not ->evict_inode(), b) clear_inode() must be called before ->evict_inode() returns. This could end up hitting BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); in evict() because I_CLEAR is set in clear_inode(). Fixes: commit 3d48d9810de4 ("btrfs: Handle uninitialised inode eviction") Cc: # v4.7-rc6+ Signed-off-by: Liu Bo Reviewed-by: Nikolay Borisov Reviewed-by: Josef Bacik Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index db3f67b581044e..dec7eea8aa295d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5318,7 +5318,7 @@ void btrfs_evict_inode(struct inode *inode) trace_btrfs_inode_evict(inode); if (!root) { - kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); + clear_inode(inode); return; } From f30c7d95b4eb1fefe9ac0b31f4a8a3e9d4b04bb1 Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Thu, 25 Jan 2018 11:02:54 -0700 Subject: [PATCH 2117/3326] Btrfs: fix use-after-free on root->orphan_block_rsv commit 1a932ef4e47984dee227834667b5ff5a334e4805 upstream. I got these from running generic/475, WARNING: CPU: 0 PID: 26384 at fs/btrfs/inode.c:3326 btrfs_orphan_commit_root+0x1ac/0x2b0 [btrfs] BUG: unable to handle kernel NULL pointer dereference at 0000000000000010 IP: btrfs_block_rsv_release+0x1c/0x70 [btrfs] Call Trace: btrfs_orphan_release_metadata+0x9f/0x200 [btrfs] btrfs_orphan_del+0x10d/0x170 [btrfs] btrfs_setattr+0x500/0x640 [btrfs] notify_change+0x7ae/0x870 do_truncate+0xca/0x130 vfs_truncate+0x2ee/0x3d0 do_sys_truncate+0xaf/0xf0 SyS_truncate+0xe/0x10 entry_SYSCALL_64_fastpath+0x1f/0x96 The race is between btrfs_orphan_commit_root and btrfs_orphan_del, t1 t2 btrfs_orphan_commit_root btrfs_orphan_del spin_lock check (&root->orphan_inodes) root->orphan_block_rsv = NULL; spin_unlock atomic_dec(&root->orphan_inodes); access root->orphan_block_rsv Accessing root->orphan_block_rsv must be done before decreasing root->orphan_inodes. cc: v3.12+ Fixes: 703c88e03524 ("Btrfs: fix tracking of orphan inode count") Signed-off-by: Liu Bo Reviewed-by: Josef Bacik Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/inode.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index dec7eea8aa295d..1ae61f82e54b15 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3371,6 +3371,11 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, ret = btrfs_orphan_reserve_metadata(trans, inode); ASSERT(!ret); if (ret) { + /* + * dec doesn't need spin_lock as ->orphan_block_rsv + * would be released only if ->orphan_inodes is + * zero. + */ atomic_dec(&root->orphan_inodes); clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, &inode->runtime_flags); @@ -3385,12 +3390,17 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, if (insert >= 1) { ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); if (ret) { - atomic_dec(&root->orphan_inodes); if (reserve) { clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, &inode->runtime_flags); btrfs_orphan_release_metadata(inode); } + /* + * btrfs_orphan_commit_root may race with us and set + * ->orphan_block_rsv to zero, in order to avoid that, + * decrease ->orphan_inodes after everything is done. + */ + atomic_dec(&root->orphan_inodes); if (ret != -EEXIST) { clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, &inode->runtime_flags); @@ -3422,28 +3432,26 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans, { struct btrfs_root *root = inode->root; int delete_item = 0; - int release_rsv = 0; int ret = 0; - spin_lock(&root->orphan_lock); if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, &inode->runtime_flags)) delete_item = 1; + if (delete_item && trans) + ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode)); + if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, &inode->runtime_flags)) - release_rsv = 1; - spin_unlock(&root->orphan_lock); + btrfs_orphan_release_metadata(inode); - if (delete_item) { + /* + * btrfs_orphan_commit_root may race with us and set ->orphan_block_rsv + * to zero, in order to avoid that, decrease ->orphan_inodes after + * everything is done. + */ + if (delete_item) atomic_dec(&root->orphan_inodes); - if (trans) - ret = btrfs_del_orphan_item(trans, root, - btrfs_ino(inode)); - } - - if (release_rsv) - btrfs_orphan_release_metadata(inode); return ret; } From 61c07810bf2e53f348dbdba751ddf7ba31aba4aa Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Thu, 25 Jan 2018 11:02:56 -0700 Subject: [PATCH 2118/3326] Btrfs: fix unexpected -EEXIST when creating new inode commit 900c9981680067573671ecc5cbfa7c5770be3a40 upstream. The highest objectid, which is assigned to new inode, is decided at the time of initializing fs roots. However, in cases where log replay gets processed, the btree which fs root owns might be changed, so we have to search it again for the highest objectid, otherwise creating new inode would end up with -EEXIST. cc: v4.4-rc6+ Fixes: f32e48e92596 ("Btrfs: Initialize btrfs_root->highest_objectid when loading tree root and subvolume roots") Signed-off-by: Liu Bo Reviewed-by: Josef Bacik Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/tree-log.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 75b964ff335b3b..b6dfe7af7a1fa2 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -28,6 +28,7 @@ #include "hash.h" #include "compression.h" #include "qgroup.h" +#include "inode-map.h" /* magic values for the inode_only field in btrfs_log_inode: * @@ -5715,6 +5716,23 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) path); } + if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { + struct btrfs_root *root = wc.replay_dest; + + btrfs_release_path(path); + + /* + * We have just replayed everything, and the highest + * objectid of fs roots probably has changed in case + * some inode_item's got replayed. + * + * root->objectid_mutex is not acquired as log replay + * could only happen during mount. + */ + ret = btrfs_find_highest_objectid(root, + &root->highest_objectid); + } + key.offset = found_key.offset - 1; wc.replay_dest->log_root = NULL; free_extent_buffer(log->node); From 8a8c9588c289bcd2dbaf7a198144293978833a7c Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Mon, 22 Jan 2018 22:02:05 +0100 Subject: [PATCH 2119/3326] 9p/trans_virtio: discard zero-length reply commit 26d99834f89e76514076d9cd06f61e56e6a509b8 upstream. When a 9p request is successfully flushed, the server is expected to just mark it as used without sending a 9p reply (ie, without writing data into the buffer). In this case, virtqueue_get_buf() will return len == 0 and we must not report a REQ_STATUS_RCVD status to the client, otherwise the client will erroneously assume the request has not been flushed. Cc: stable@vger.kernel.org Signed-off-by: Greg Kurz Signed-off-by: Michael S. Tsirkin Signed-off-by: Greg Kroah-Hartman --- net/9p/trans_virtio.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index f3a4efcf145642..3aa5a93ad107c1 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -160,7 +160,8 @@ static void req_done(struct virtqueue *vq) spin_unlock_irqrestore(&chan->lock, flags); /* Wakeup if anyone waiting for VirtIO ring space. */ wake_up(chan->vc_wq); - p9_client_cb(chan->client, req, REQ_STATUS_RCVD); + if (len) + p9_client_cb(chan->client, req, REQ_STATUS_RCVD); } } From bc74262f3a6524b5122cf1388965bd0c7d9cb5cb Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Fri, 9 Feb 2018 13:21:42 +0100 Subject: [PATCH 2120/3326] mtd: nand: vf610: set correct ooblayout commit ea56fb282368ea08c2a313af6b55cb597aec4db1 upstream. With commit 3cf32d180227 ("mtd: nand: vf610: switch to mtd_ooblayout_ops") the driver started to use the NAND cores default large page ooblayout. However, shortly after commit 6a623e076944 ("mtd: nand: add ooblayout for old hamming layout") changed the default layout to the old hamming layout, which is not what vf610_nfc is using. Specify the default large page layout explicitly. Fixes: 6a623e076944 ("mtd: nand: add ooblayout for old hamming layout") Cc: # v4.12+ Signed-off-by: Stefan Agner Signed-off-by: Boris Brezillon Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/vf610_nfc.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c index 8037d4b48a056a..e2583a539b413e 100644 --- a/drivers/mtd/nand/vf610_nfc.c +++ b/drivers/mtd/nand/vf610_nfc.c @@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev) if (mtd->oobsize > 64) mtd->oobsize = 64; - /* - * mtd->ecclayout is not specified here because we're using the - * default large page ECC layout defined in NAND core. - */ + /* Use default large page ECC layout defined in NAND core */ + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); if (chip->ecc.strength == 32) { nfc->ecc_mode = ECC_60_BYTE; chip->ecc.bytes = 60; From c0cf529a8f4df5d063816c59af8ea927d277c799 Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Mon, 29 Jan 2018 14:23:15 +0800 Subject: [PATCH 2121/3326] ALSA: hda - Fix headset mic detection problem for two Dell machines commit 3f2f7c553d077be6a30cb96b2976a2c940bf5335 upstream. One of them has the codec of alc256 and the other one has the codec of alc289. Cc: Signed-off-by: Hui Wang Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b2d039537d5ecf..bd6d562fe71697 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6584,6 +6584,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0xb7a60130}, {0x14, 0x90170110}, {0x21, 0x02211020}), + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x12, 0x90a60130}, + {0x14, 0x90170110}, + {0x14, 0x01011020}, + {0x21, 0x0221101f}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, ALC256_STANDARD_PINS), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC, @@ -6653,6 +6658,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0x90a60120}, {0x14, 0x90170110}, {0x21, 0x0321101f}), + SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x12, 0xb7a60130}, + {0x14, 0x90170110}, + {0x21, 0x04211020}), SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, ALC290_STANDARD_PINS, {0x15, 0x04211040}, From 8f65cd77c97c033bade459958611b1c9126c553c Mon Sep 17 00:00:00 2001 From: Kirill Marinushkin Date: Mon, 29 Jan 2018 06:37:55 +0100 Subject: [PATCH 2122/3326] ALSA: usb-audio: Fix UAC2 get_ctl request with a RANGE attribute commit 447cae58cecd69392b74a4a42cd0ab9cabd816af upstream. The layout of the UAC2 Control request and response varies depending on the request type. With the current implementation, only the Layout 2 Parameter Block (with the 2-byte sized RANGE attribute) is handled properly. For the Control requests with the 1-byte sized RANGE attribute (Bass Control, Mid Control, Tremble Control), the response is parsed incorrectly. This commit: * fixes the wLength field value in the request * fixes parsing the range values from the response Fixes: 23caaf19b11e ("ALSA: usb-mixer: Add support for Audio Class v2.0") Signed-off-by: Kirill Marinushkin Cc: Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/mixer.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 75bce127d768c6..89efec891e68cb 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -347,17 +347,20 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) { struct snd_usb_audio *chip = cval->head.mixer->chip; - unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */ + /* enough space for one range */ + unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)]; unsigned char *val; - int idx = 0, ret, size; + int idx = 0, ret, val_size, size; __u8 bRequest; + val_size = uac2_ctl_value_size(cval->val_type); + if (request == UAC_GET_CUR) { bRequest = UAC2_CS_CUR; - size = uac2_ctl_value_size(cval->val_type); + size = val_size; } else { bRequest = UAC2_CS_RANGE; - size = sizeof(buf); + size = sizeof(__u16) + 3 * val_size; } memset(buf, 0, sizeof(buf)); @@ -390,16 +393,17 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, val = buf + sizeof(__u16); break; case UAC_GET_MAX: - val = buf + sizeof(__u16) * 2; + val = buf + sizeof(__u16) + val_size; break; case UAC_GET_RES: - val = buf + sizeof(__u16) * 3; + val = buf + sizeof(__u16) + val_size * 2; break; default: return -EINVAL; } - *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16))); + *value_ret = convert_signed_value(cval, + snd_usb_combine_bytes(val, val_size)); return 0; } From 5640397f06ada0800d734b98fced5e74fbd4b217 Mon Sep 17 00:00:00 2001 From: Kailang Yang Date: Fri, 2 Feb 2018 15:13:09 +0800 Subject: [PATCH 2123/3326] ALSA: hda/realtek - Add headset mode support for Dell laptop commit 40e2c4e5a7efcd50983aacbddd3c617e776018bf upstream. This platform had two Dmic and single Dmic. This update was for single Dmic. This commit was for two Dmic. Fixes: 75ee94b20b46 ("ALSA: hda - fix headset mic problem for Dell machines...") Signed-off-by: Kailang Yang Cc: Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index bd6d562fe71697..41acae02991da9 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6176,6 +6176,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), + SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), + SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), From 81ae4f7479f96a5123ace58cc3bdc11303de8a46 Mon Sep 17 00:00:00 2001 From: Kailang Yang Date: Fri, 2 Feb 2018 15:26:46 +0800 Subject: [PATCH 2124/3326] ALSA: hda/realtek - Enable Thinkpad Dock device for ALC298 platform commit 61fcf8ece9b6b09450250c4ca40cc3b81a96a68d upstream. Thinkpad Dock device support for ALC298 platform. It need to use SSID for the quirk table. Because IdeaPad also has ALC298 platform. Use verb for the quirk table will confuse. Signed-off-by: Kailang Yang Cc: Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 42 +++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 41acae02991da9..80de62d8931498 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4827,6 +4827,28 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec, } } +static void alc_fixup_tpt470_dock(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + static const struct hda_pintbl pincfgs[] = { + { 0x17, 0x21211010 }, /* dock headphone */ + { 0x19, 0x21a11010 }, /* dock mic */ + { } + }; + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; + /* Enable DOCK device */ + snd_hda_codec_write(codec, 0x17, 0, + AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); + /* Enable DOCK device */ + snd_hda_codec_write(codec, 0x19, 0, + AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); + snd_hda_apply_pincfgs(codec, pincfgs); + } +} + static void alc_shutup_dell_xps13(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; @@ -5301,6 +5323,7 @@ enum { ALC700_FIXUP_INTEL_REFERENCE, ALC274_FIXUP_DELL_BIND_DACS, ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, + ALC298_FIXUP_TPT470_DOCK, }; static const struct hda_fixup alc269_fixups[] = { @@ -6126,6 +6149,12 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC274_FIXUP_DELL_BIND_DACS }, + [ALC298_FIXUP_TPT470_DOCK] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_tpt470_dock, + .chained = true, + .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -6307,8 +6336,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), + SND_PCI_QUIRK(0x17aa, 0x222d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x222e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460), + SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), @@ -6329,7 +6366,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460), + SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ From de3e8191756434f6fa4bd69d38ceb5bacd0f1028 Mon Sep 17 00:00:00 2001 From: Jan-Marek Glogowski Date: Wed, 14 Feb 2018 11:29:15 +0100 Subject: [PATCH 2125/3326] ALSA: hda/realtek: PCI quirk for Fujitsu U7x7 commit fdcc968a3b290407bcba9d4c90e2fba6d8d928f1 upstream. These laptops have a combined jack to attach headsets, the U727 on the left, the U757 on the right, but a headsets microphone doesn't work. Using hdajacksensetest I found that pin 0x19 changed the present state when plugging the headset, in addition to 0x21, but didn't have the correct configuration (shown as "Not connected"). So this sets the configuration to the same values as the headphone pin 0x21 except for the device type microphone, which makes it work correctly. With the patch the configured pins for U727 are Pin 0x12 (Internal Mic, Mobile-In): present = No Pin 0x14 (Internal Speaker): present = No Pin 0x19 (Black Mic, Left side): present = No Pin 0x1d (Internal Aux): present = No Pin 0x21 (Black Headphone, Left side): present = No Signed-off-by: Jan-Marek Glogowski Cc: Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 80de62d8931498..b7acffdf16a447 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -3355,6 +3355,19 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec, spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; } +static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21); + unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19); + + if (cfg_headphone && cfg_headset_mic == 0x411111f0) + snd_hda_codec_set_pincfg(codec, 0x19, + (cfg_headphone & ~AC_DEFCFG_DEVICE) | + (AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT)); +} + static void alc269_fixup_hweq(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -5228,6 +5241,7 @@ enum { ALC269_FIXUP_LIFEBOOK_EXTMIC, ALC269_FIXUP_LIFEBOOK_HP_PIN, ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT, + ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC, ALC269_FIXUP_AMIC, ALC269_FIXUP_DMIC, ALC269VB_FIXUP_AMIC, @@ -5434,6 +5448,10 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_pincfg_no_hp_to_lineout, }, + [ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_pincfg_U7x7_headset_mic, + }, [ALC269_FIXUP_AMIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -6308,6 +6326,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT), SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), + SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC), SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), From e1b13eb16c98eb289751098f8f79ff220b8bfdff Mon Sep 17 00:00:00 2001 From: Lassi Ylikojola Date: Fri, 9 Feb 2018 16:51:36 +0200 Subject: [PATCH 2126/3326] ALSA: usb-audio: add implicit fb quirk for Behringer UFX1204 commit 5e35dc0338d85ccebacf3f77eca1e5dea73155e8 upstream. Add quirk to ensure a sync endpoint is properly configured. This patch is a fix for same symptoms on Behringer UFX1204 as patch from Albertto Aquirre on Dec 8 2016 for Axe-Fx II. Signed-off-by: Lassi Ylikojola Cc: Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/pcm.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index b9c9a19f9588a8..3cbfae6604f98b 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -352,6 +352,15 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, ep = 0x86; iface = usb_ifnum_to_if(dev, 2); + if (!iface || iface->num_altsetting == 0) + return -EINVAL; + + alts = &iface->altsetting[1]; + goto add_sync_ep; + case USB_ID(0x1397, 0x0002): + ep = 0x81; + iface = usb_ifnum_to_if(dev, 1); + if (!iface || iface->num_altsetting == 0) return -EINVAL; From ec5a08abefe6aa239ada078e6041f222eac52196 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Sun, 11 Feb 2018 09:50:27 -0400 Subject: [PATCH 2127/3326] ALSA: usb: add more device quirks for USB DSD devices commit 7c74866baef1827e18f8269aec85030063520bd4 upstream. Add some more devices that need quirks to handle DSD modes correctly. Signed-off-by: Daniel Mack Reported-and-tested-by: Thomas Gresens Cc: Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/quirks.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 8d7db7cd4f8803..ed56cd307059f2 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1369,8 +1369,11 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, return SNDRV_PCM_FMTBIT_DSD_U32_BE; break; - /* Amanero Combo384 USB interface with native DSD support */ - case USB_ID(0x16d0, 0x071a): + /* Amanero Combo384 USB based DACs with native DSD support */ + case USB_ID(0x16d0, 0x071a): /* Amanero - Combo384 */ + case USB_ID(0x2ab6, 0x0004): /* T+A DAC8DSD-V2.0, MP1000E-V2.0, MP2000R-V2.0, MP2500R-V2.0, MP3100HV-V2.0 */ + case USB_ID(0x2ab6, 0x0005): /* T+A USB HD Audio 1 */ + case USB_ID(0x2ab6, 0x0006): /* T+A USB HD Audio 2 */ if (fp->altsetting == 2) { switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) { case 0x199: From 7466294dad8963e29f051d8ab2f4d3f67d2f62a6 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 12 Feb 2018 15:20:51 +0100 Subject: [PATCH 2128/3326] ALSA: seq: Fix racy pool initializations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d15d662e89fc667b90cd294b0eb45694e33144da upstream. ALSA sequencer core initializes the event pool on demand by invoking snd_seq_pool_init() when the first write happens and the pool is empty. Meanwhile user can reset the pool size manually via ioctl concurrently, and this may lead to UAF or out-of-bound accesses since the function tries to vmalloc / vfree the buffer. A simple fix is to just wrap the snd_seq_pool_init() call with the recently introduced client->ioctl_mutex; as the calls for snd_seq_pool_init() from other side are always protected with this mutex, we can avoid the race. Reported-by: 范龙飞 Cc: Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/seq/seq_clientmgr.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index ac30fc1ab98bf0..dea11d1babf513 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -999,7 +999,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, { struct snd_seq_client *client = file->private_data; int written = 0, len; - int err = -EINVAL; + int err; struct snd_seq_event event; if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT)) @@ -1014,11 +1014,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, /* allocate the pool now if the pool is not allocated yet */ if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { - if (snd_seq_pool_init(client->pool) < 0) + mutex_lock(&client->ioctl_mutex); + err = snd_seq_pool_init(client->pool); + mutex_unlock(&client->ioctl_mutex); + if (err < 0) return -ENOMEM; } /* only process whole events */ + err = -EINVAL; while (count >= sizeof(struct snd_seq_event)) { /* Read in the event header from the user */ len = sizeof(event); From ac98d5a624fed603233421ddd808f2623f331ad8 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sun, 11 Feb 2018 18:10:28 -0500 Subject: [PATCH 2129/3326] mvpp2: fix multicast address filter commit 7ac8ff95f48cbfa609a060fd6a1e361dd62feeb3 upstream. IPv6 doesn't work on the MacchiatoBIN board. It is caused by broken multicast address filter in the mvpp2 driver. The driver loads doesn't load any multicast entries if "allmulti" is not set. This condition should be reversed. The condition !netdev_mc_empty(dev) is useless (because netdev_for_each_mc_addr is nop if the list is empty). This patch also fixes a possible overflow of the multicast list - if mvpp2_prs_mac_da_accept fails, we set the allmulti flag and retry. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/marvell/mvpp2.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 1dd3a1264a53e8..06f3fe429d825b 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -6888,6 +6888,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev) int id = port->id; bool allmulti = dev->flags & IFF_ALLMULTI; +retry: mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); @@ -6895,9 +6896,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev) /* Remove all port->id's mcast enries */ mvpp2_prs_mcast_del_all(priv, id); - if (allmulti && !netdev_mc_empty(dev)) { - netdev_for_each_mc_addr(ha, dev) - mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); + if (!allmulti) { + netdev_for_each_mc_addr(ha, dev) { + if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) { + allmulti = true; + goto retry; + } + } } } From e4ea7c12229c5fec232120713abe74bd35164157 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 31 Jan 2018 22:24:45 +0000 Subject: [PATCH 2130/3326] usb: Move USB_UHCI_BIG_ENDIAN_* out of USB_SUPPORT commit ec897569ad7dbc6d595873a487c3fac23f463f76 upstream. Move the Kconfig symbols USB_UHCI_BIG_ENDIAN_MMIO and USB_UHCI_BIG_ENDIAN_DESC out of drivers/usb/host/Kconfig, which is conditional upon USB && USB_SUPPORT, so that it can be freely selected by platform Kconfig symbols in architecture code. For example once the MIPS_GENERIC platform selects are fixed in commit 2e6522c56552 ("MIPS: Fix typo BIG_ENDIAN to CPU_BIG_ENDIAN"), the MIPS 32r6_defconfig warns like so: warning: (MIPS_GENERIC) selects USB_UHCI_BIG_ENDIAN_MMIO which has unmet direct dependencies (USB_SUPPORT && USB) warning: (MIPS_GENERIC) selects USB_UHCI_BIG_ENDIAN_DESC which has unmet direct dependencies (USB_SUPPORT && USB) Fixes: 2e6522c56552 ("MIPS: Fix typo BIG_ENDIAN to CPU_BIG_ENDIAN") Signed-off-by: James Hogan Cc: Corentin Labbe Cc: Ralf Baechle Cc: Paul Burton Cc: linux-usb@vger.kernel.org Cc: linux-mips@linux-mips.org Acked-by: Greg Kroah-Hartman Patchwork: https://patchwork.linux-mips.org/patch/18559/ Signed-off-by: Greg Kroah-Hartman --- drivers/usb/Kconfig | 8 ++++++++ drivers/usb/host/Kconfig | 8 -------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 939a63bca82fb4..72eb3e41e3b65d 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -19,6 +19,14 @@ config USB_EHCI_BIG_ENDIAN_MMIO config USB_EHCI_BIG_ENDIAN_DESC bool +config USB_UHCI_BIG_ENDIAN_MMIO + bool + default y if SPARC_LEON + +config USB_UHCI_BIG_ENDIAN_DESC + bool + default y if SPARC_LEON + menuconfig USB_SUPPORT bool "USB support" depends on HAS_IOMEM diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index fa5692dec83206..92b19721b595e5 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -637,14 +637,6 @@ config USB_UHCI_ASPEED bool default y if ARCH_ASPEED -config USB_UHCI_BIG_ENDIAN_MMIO - bool - default y if SPARC_LEON - -config USB_UHCI_BIG_ENDIAN_DESC - bool - default y if SPARC_LEON - config USB_FHCI_HCD tristate "Freescale QE USB Host Controller support" depends on OF_GPIO && QE_GPIO && QUICC_ENGINE From 26f8c38bb466c1a2d232d7609fb4bfb4bc121678 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Thu, 25 Jan 2018 14:23:48 -0800 Subject: [PATCH 2131/3326] x86/mm, mm/hwpoison: Don't unconditionally unmap kernel 1:1 pages commit fd0e786d9d09024f67bd71ec094b110237dc3840 upstream. In the following commit: ce0fa3e56ad2 ("x86/mm, mm/hwpoison: Clear PRESENT bit for kernel 1:1 mappings of poison pages") ... we added code to memory_failure() to unmap the page from the kernel 1:1 virtual address space to avoid speculative access to the page logging additional errors. But memory_failure() may not always succeed in taking the page offline, especially if the page belongs to the kernel. This can happen if there are too many corrected errors on a page and either mcelog(8) or drivers/ras/cec.c asks to take a page offline. Since we remove the 1:1 mapping early in memory_failure(), we can end up with the page unmapped, but still in use. On the next access the kernel crashes :-( There are also various debug paths that call memory_failure() to simulate occurrence of an error. Since there is no actual error in memory, we don't need to map out the page for those cases. Revert most of the previous attempt and keep the solution local to arch/x86/kernel/cpu/mcheck/mce.c. Unmap the page only when: 1) there is a real error 2) memory_failure() succeeds. All of this only applies to 64-bit systems. 32-bit kernel doesn't map all of memory into kernel space. It isn't worth adding the code to unmap the piece that is mapped because nobody would run a 32-bit kernel on a machine that has recoverable machine checks. Signed-off-by: Tony Luck Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Cc: Denys Vlasenko Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Naoya Horiguchi Cc: Peter Zijlstra Cc: Robert (Persistent Memory) Cc: Thomas Gleixner Cc: linux-mm@kvack.org Cc: stable@vger.kernel.org #v4.14 Fixes: ce0fa3e56ad2 ("x86/mm, mm/hwpoison: Clear PRESENT bit for kernel 1:1 mappings of poison pages") Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/page_64.h | 4 ---- arch/x86/kernel/cpu/mcheck/mce-internal.h | 15 +++++++++++++++ arch/x86/kernel/cpu/mcheck/mce.c | 17 +++++++++++------ include/linux/mm_inline.h | 6 ------ mm/memory-failure.c | 2 -- 5 files changed, 26 insertions(+), 18 deletions(-) diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 4baa6bceb2325e..d652a380806597 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -52,10 +52,6 @@ static inline void clear_page(void *page) void copy_page(void *to, void *from); -#ifdef CONFIG_X86_MCE -#define arch_unmap_kpfn arch_unmap_kpfn -#endif - #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_X86_VSYSCALL_EMULATION diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index aa0d5df9dc60e7..e956eb26706191 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } extern struct mca_config mca_cfg; +#ifndef CONFIG_X86_64 +/* + * On 32-bit systems it would be difficult to safely unmap a poison page + * from the kernel 1:1 map because there are no non-canonical addresses that + * we can use to refer to the address without risking a speculative access. + * However, this isn't much of an issue because: + * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which + * are only mapped into the kernel as needed + * 2) Few people would run a 32-bit kernel on a machine that supports + * recoverable errors because they have too much memory to boot 32-bit. + */ +static inline void mce_unmap_kpfn(unsigned long pfn) {} +#define mce_unmap_kpfn mce_unmap_kpfn +#endif + #endif /* __X86_MCE_INTERNAL_H__ */ diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index a9e898b71208f1..73237aa271ea06 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -106,6 +106,10 @@ static struct irq_work mce_irq_work; static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); +#ifndef mce_unmap_kpfn +static void mce_unmap_kpfn(unsigned long pfn); +#endif + /* * CPU/chipset specific EDAC code can register a notifier call here to print * MCE errors in a human-readable form. @@ -582,7 +586,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { pfn = mce->addr >> PAGE_SHIFT; - memory_failure(pfn, MCE_VECTOR, 0); + if (memory_failure(pfn, MCE_VECTOR, 0)) + mce_unmap_kpfn(pfn); } return NOTIFY_OK; @@ -1049,12 +1054,13 @@ static int do_memory_failure(struct mce *m) ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags); if (ret) pr_err("Memory error not recovered"); + else + mce_unmap_kpfn(m->addr >> PAGE_SHIFT); return ret; } -#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE) - -void arch_unmap_kpfn(unsigned long pfn) +#ifndef mce_unmap_kpfn +static void mce_unmap_kpfn(unsigned long pfn) { unsigned long decoy_addr; @@ -1065,7 +1071,7 @@ void arch_unmap_kpfn(unsigned long pfn) * We would like to just call: * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1); * but doing that would radically increase the odds of a - * speculative access to the posion page because we'd have + * speculative access to the poison page because we'd have * the virtual address of the kernel 1:1 mapping sitting * around in registers. * Instead we get tricky. We create a non-canonical address @@ -1090,7 +1096,6 @@ void arch_unmap_kpfn(unsigned long pfn) if (set_memory_np(decoy_addr, 1)) pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); - } #endif diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index c30b32e3c86248..10191c28fc04ce 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -127,10 +127,4 @@ static __always_inline enum lru_list page_lru(struct page *page) #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) -#ifdef arch_unmap_kpfn -extern void arch_unmap_kpfn(unsigned long pfn); -#else -static __always_inline void arch_unmap_kpfn(unsigned long pfn) { } -#endif - #endif diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 88366626c0b7c7..1cd3b3569af8a7 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1146,8 +1146,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags) return 0; } - arch_unmap_kpfn(pfn); - orig_head = hpage = compound_head(p); num_poisoned_pages_inc(); From 875758d79d8f6182aa2c76c09eb810a7c54249ec Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 13 Dec 2017 14:21:37 +0100 Subject: [PATCH 2132/3326] scsi: core: check for device state in __scsi_remove_target() commit 81b6c999897919d5a16fedc018fe375dbab091c5 upstream. As it turned out device_get() doesn't use kref_get_unless_zero(), so we will be always getting a device pointer. Consequently, we need to check for the device state in __scsi_remove_target() to avoid tripping over deleted objects. Fixes: fbce4d97fd43 ("scsi: fixup kernel warning during rmmod()") Reported-by: Jason Yan Signed-off-by: Hannes Reinecke Reviewed-by: Bart Van Assche Reviewed-by: Ewan D. Milne Signed-off-by: Martin K. Petersen Cc: Max Ivanov Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/scsi_sysfs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index f796bd61f3f065..40406c162d0d7f 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1383,7 +1383,10 @@ static void __scsi_remove_target(struct scsi_target *starget) * check. */ if (sdev->channel != starget->channel || - sdev->id != starget->id || + sdev->id != starget->id) + continue; + if (sdev->sdev_state == SDEV_DEL || + sdev->sdev_state == SDEV_CANCEL || !get_device(&sdev->sdev_gendev)) continue; spin_unlock_irqrestore(shost->host_lock, flags); From dcdc01c2edd4baf8096c70837fd583e1cc00d023 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Oct 2017 15:47:54 +0200 Subject: [PATCH 2133/3326] Bluetooth: BT_HCIUART now depends on SERIAL_DEV_BUS commit 05e89fb576f580ac95e7a5d00bdb34830b09671a upstream. It is no longer possible to build BT_HCIUART into the kernel when SERIAL_DEV_BUS is a loadable module, even if none of the SERIAL_DEV_BUS based implementations are selected: drivers/bluetooth/hci_ldisc.o: In function `hci_uart_set_flow_control': hci_ldisc.c:(.text+0xb40): undefined reference to `serdev_device_set_flow_control' hci_ldisc.c:(.text+0xb5c): undefined reference to `serdev_device_set_tiocm' This adds a dependency to avoid the broken configuration. Fixes: 7841d554809b ("Bluetooth: hci_uart_set_flow_control: Fix NULL deref when using serdev") Signed-off-by: Arnd Bergmann Signed-off-by: Marcel Holtmann Signed-off-by: Greg Kroah-Hartman --- drivers/bluetooth/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 98a60db8e5d14c..b33c8d6eb8c757 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -66,6 +66,7 @@ config BT_HCIBTSDIO config BT_HCIUART tristate "HCI UART driver" + depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS depends on TTY help Bluetooth HCI UART driver. @@ -80,7 +81,6 @@ config BT_HCIUART config BT_HCIUART_SERDEV bool depends on SERIAL_DEV_BUS && BT_HCIUART - depends on SERIAL_DEV_BUS=y || SERIAL_DEV_BUS=BT_HCIUART default y config BT_HCIUART_H4 From f5eab7c3d1003d0d8df8beefa531187452212c59 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 21 Dec 2017 22:30:07 +0100 Subject: [PATCH 2134/3326] ARM: dts: exynos: fix RTC interrupt for exynos5410 commit 5628a8ca14149ba4226e3bdce3a04c3b688435ad upstream. According to the comment added to exynos_dt_pmu_match[] in commit 8b283c025443 ("ARM: exynos4/5: convert pmu wakeup to stacked domains"), the RTC is not able to wake up the system through the PMU on Exynos5410, unlike Exynos5420. However, when the RTC DT node got added, it was a straight copy of the Exynos5420 node, which now causes a warning from dtc. This removes the incorrect interrupt-parent, which should get the interrupt working and avoid the warning. Fixes: e1e146b1b062 ("ARM: dts: exynos: Add RTC and I2C to Exynos5410") Signed-off-by: Arnd Bergmann Signed-off-by: Krzysztof Kozlowski Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/exynos5410.dtsi | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi index 7eab4bc07cec6c..7628bbb0232480 100644 --- a/arch/arm/boot/dts/exynos5410.dtsi +++ b/arch/arm/boot/dts/exynos5410.dtsi @@ -333,7 +333,6 @@ &rtc { clocks = <&clock CLK_RTC>; clock-names = "rtc"; - interrupt-parent = <&pmu_system_controller>; status = "disabled"; }; From 361bd5be7b845f10f6f282987088c1e62dd671d9 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 10 Jan 2018 15:40:37 +0100 Subject: [PATCH 2135/3326] ARM: pxa/tosa-bt: add MODULE_LICENSE tag commit 3343647813fdf0f2409fbf5816ee3e0622168079 upstream. Without this tag, we get a build warning: WARNING: modpost: missing MODULE_LICENSE() in arch/arm/mach-pxa/tosa-bt.o For completeness, I'm also adding author and description fields. Acked-by: Robert Jarzmik Signed-off-by: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- arch/arm/mach-pxa/tosa-bt.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c index 107f37210fb9de..83606087edc715 100644 --- a/arch/arm/mach-pxa/tosa-bt.c +++ b/arch/arm/mach-pxa/tosa-bt.c @@ -132,3 +132,7 @@ static struct platform_driver tosa_bt_driver = { }, }; module_platform_driver(tosa_bt_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dmitry Baryshkov"); +MODULE_DESCRIPTION("Bluetooth built-in chip control"); From 34aac3426553aed01e59ec30467e42d165af2142 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Tue, 7 Nov 2017 19:45:01 -0800 Subject: [PATCH 2136/3326] arm64: dts: msm8916: Add missing #phy-cells commit b0ab681285aa66064f2de5b74191c0cabba381ff upstream. Add a missing #phy-cells to the dsi-phy, to silence dtc warning. Cc: Archit Taneja Fixes: 305410ffd1b2 ("arm64: dts: msm8916: Add display support") Signed-off-by: Bjorn Andersson Reviewed-by: Archit Taneja Signed-off-by: Andy Gross Signed-off-by: Greg Kroah-Hartman --- arch/arm64/boot/dts/qcom/msm8916.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 8e5421a0b43d8b..61da6e65900b24 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -901,6 +901,7 @@ "dsi_phy_regulator"; #clock-cells = <1>; + #phy-cells = <0>; clocks = <&gcc GCC_MDSS_AHB_CLK>; clock-names = "iface_clk"; From 7367af9cf0e4e1502151d42535cae779046d77e4 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 10 Jan 2018 17:10:11 +0100 Subject: [PATCH 2137/3326] ARM: dts: s5pv210: add interrupt-parent for ohci commit 5c1037196b9ee75897c211972de370ed1336ec8f upstream. The ohci-hcd node has an interrupt number but no interrupt-parent, leading to a warning with current dtc versions: arch/arm/boot/dts/s5pv210-aquila.dtb: Warning (interrupts_property): Missing interrupt-parent for /soc/ohci@ec300000 arch/arm/boot/dts/s5pv210-goni.dtb: Warning (interrupts_property): Missing interrupt-parent for /soc/ohci@ec300000 arch/arm/boot/dts/s5pv210-smdkc110.dtb: Warning (interrupts_property): Missing interrupt-parent for /soc/ohci@ec300000 arch/arm/boot/dts/s5pv210-smdkv210.dtb: Warning (interrupts_property): Missing interrupt-parent for /soc/ohci@ec300000 arch/arm/boot/dts/s5pv210-torbreck.dtb: Warning (interrupts_property): Missing interrupt-parent for /soc/ohci@ec300000 As seen from the related exynos dts files, the ohci and ehci controllers always share one interrupt number, and the number is the same here as well, so setting the same interrupt-parent is the reasonable solution here. Reviewed-by: Krzysztof Kozlowski Signed-off-by: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/s5pv210.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi index 726c5d0dbd5bce..b290a5abb90163 100644 --- a/arch/arm/boot/dts/s5pv210.dtsi +++ b/arch/arm/boot/dts/s5pv210.dtsi @@ -463,6 +463,7 @@ compatible = "samsung,exynos4210-ohci"; reg = <0xec300000 0x100>; interrupts = <23>; + interrupt-parent = <&vic1>; clocks = <&clocks CLK_USB_HOST>; clock-names = "usbhost"; #address-cells = <1>; From 011c19041424dabd87e0bfa26b5ce20c1a0119e1 Mon Sep 17 00:00:00 2001 From: Matthias Brugger Date: Fri, 1 Dec 2017 13:07:07 +0100 Subject: [PATCH 2138/3326] arm: dts: mt7623: Update ethsys binding commit 76a09ce214addb8ddc0f6d50dc1106a5f829e713 upstream. The ethsys binding misses the reset-cells, this patch adds this property. Reviewed-by: Rob Herring Signed-off-by: Matthias Brugger Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/mt7623.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi index ec8a07415cb388..36983a7d7cfd49 100644 --- a/arch/arm/boot/dts/mt7623.dtsi +++ b/arch/arm/boot/dts/mt7623.dtsi @@ -753,6 +753,7 @@ "syscon"; reg = <0 0x1b000000 0 0x1000>; #clock-cells = <1>; + #reset-cells = <1>; }; eth: ethernet@1b100000 { From 5782df0a58b6748cca77cd0a7684c8c7e2401af2 Mon Sep 17 00:00:00 2001 From: Matthias Brugger Date: Fri, 1 Dec 2017 13:07:08 +0100 Subject: [PATCH 2139/3326] arm: dts: mt2701: Add reset-cells commit ae72e95b5e4ded145bfc6926ad9457b74e3af41a upstream. The hifsys and ethsys needs the definition of the reset-cells property. Fix this. Reviewed-by: Rob Herring Signed-off-by: Matthias Brugger Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/mt2701.dtsi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi index afe12e5b51f953..f936000f069965 100644 --- a/arch/arm/boot/dts/mt2701.dtsi +++ b/arch/arm/boot/dts/mt2701.dtsi @@ -593,6 +593,7 @@ compatible = "mediatek,mt2701-hifsys", "syscon"; reg = <0 0x1a000000 0 0x1000>; #clock-cells = <1>; + #reset-cells = <1>; }; usb0: usb@1a1c0000 { @@ -677,6 +678,7 @@ compatible = "mediatek,mt2701-ethsys", "syscon"; reg = <0 0x1b000000 0 0x1000>; #clock-cells = <1>; + #reset-cells = <1>; }; eth: ethernet@1b100000 { From bbd577fec485647aaad0e2ba6142f2fec363b946 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 2 Jan 2018 08:57:17 +0100 Subject: [PATCH 2140/3326] ARM: dts: Delete bogus reference to the charlcd commit 586b2a4befad88cd87b372a1cea01e58c6811ea9 upstream. The EB MP board probably has a character LCD but the board manual does not really state which IRQ it has assigned to this device. The invalid assignment was a mistake by me during submission of the DTSI where I was looking for the reference, didn't find it and didn't fill it in. Delete this for now: it can probably be fixed but that requires access to the actual board for some trial-and-error experiments. Reported-by: Arnd Bergmann Signed-off-by: Linus Walleij Signed-off-by: Olof Johansson Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/arm-realview-eb-mp.dtsi | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi index 7b8d90b7aeea20..29b636fce23f3e 100644 --- a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi +++ b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi @@ -150,11 +150,6 @@ interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>; }; -&charlcd { - interrupt-parent = <&intc>; - interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; -}; - &serial0 { interrupt-parent = <&intc>; interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>; From 11785a9ece1b10d43156ae79e24f709053644bc5 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 30 Nov 2017 06:08:05 -0500 Subject: [PATCH 2141/3326] media: r820t: fix r820t_write_reg for KASAN commit 16c3ada89cff9a8c2a0eea34ffa1aa20af3f6008 upstream. With CONFIG_KASAN, we get an overly long stack frame due to inlining the register access functions: drivers/media/tuners/r820t.c: In function 'generic_set_freq.isra.7': drivers/media/tuners/r820t.c:1334:1: error: the frame size of 2880 bytes is larger than 2048 bytes [-Werror=frame-larger-than=] This is caused by a gcc bug that has now been fixed in gcc-8. To work around the problem, we can pass the register data through a local variable that older gcc versions can optimize out as well. Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 Signed-off-by: Arnd Bergmann Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/tuners/r820t.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c index ba80376a3b8646..d097eb04a0e923 100644 --- a/drivers/media/tuners/r820t.c +++ b/drivers/media/tuners/r820t.c @@ -396,9 +396,11 @@ static int r820t_write(struct r820t_priv *priv, u8 reg, const u8 *val, return 0; } -static int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val) +static inline int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val) { - return r820t_write(priv, reg, &val, 1); + u8 tmp = val; /* work around GCC PR81715 with asan-stack=1 */ + + return r820t_write(priv, reg, &tmp, 1); } static int r820t_read_cache_reg(struct r820t_priv *priv, int reg) @@ -411,17 +413,18 @@ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg) return -EINVAL; } -static int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val, +static inline int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val, u8 bit_mask) { + u8 tmp = val; int rc = r820t_read_cache_reg(priv, reg); if (rc < 0) return rc; - val = (rc & ~bit_mask) | (val & bit_mask); + tmp = (rc & ~bit_mask) | (tmp & bit_mask); - return r820t_write(priv, reg, &val, 1); + return r820t_write(priv, reg, &tmp, 1); } static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len) From ffe075e233bb8049ba7d3e362b37bc97ecf7e6fa Mon Sep 17 00:00:00 2001 From: yangbo lu Date: Thu, 21 Sep 2017 16:43:31 +0800 Subject: [PATCH 2142/3326] mmc: sdhci-of-esdhc: disable SD clock for clock value 0 commit dd3f6983b4a468efca9e8caa0e2b4aa20946d801 upstream. SD clock should be disabled for clock value 0. It's not right to just return. This may cause failure of signal voltage switching. Signed-off-by: Yangbo Lu Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson Cc: Rasmus Villemoes Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/sdhci-of-esdhc.c | 58 ++++++++++++++++--------------- 1 file changed, 30 insertions(+), 28 deletions(-) diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index d96a057a7db88e..1f424374bbbb5d 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -458,6 +458,33 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) return clock / 256 / 16; } +static void esdhc_clock_enable(struct sdhci_host *host, bool enable) +{ + u32 val; + ktime_t timeout; + + val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + + if (enable) + val |= ESDHC_CLOCK_SDCLKEN; + else + val &= ~ESDHC_CLOCK_SDCLKEN; + + sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); + + /* Wait max 20 ms */ + timeout = ktime_add_ms(ktime_get(), 20); + val = ESDHC_CLOCK_STABLE; + while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) { + if (ktime_after(ktime_get(), timeout)) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + break; + } + udelay(10); + } +} + static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -469,8 +496,10 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) host->mmc->actual_clock = 0; - if (clock == 0) + if (clock == 0) { + esdhc_clock_enable(host, false); return; + } /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ if (esdhc->vendor_ver < VENDOR_V_23) @@ -558,33 +587,6 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) sdhci_writel(host, ctrl, ESDHC_PROCTL); } -static void esdhc_clock_enable(struct sdhci_host *host, bool enable) -{ - u32 val; - ktime_t timeout; - - val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - - if (enable) - val |= ESDHC_CLOCK_SDCLKEN; - else - val &= ~ESDHC_CLOCK_SDCLKEN; - - sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); - - /* Wait max 20 ms */ - timeout = ktime_add_ms(ktime_get(), 20); - val = ESDHC_CLOCK_STABLE; - while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) { - if (ktime_after(ktime_get(), timeout)) { - pr_err("%s: Internal clock never stabilised.\n", - mmc_hostname(host->mmc)); - break; - } - udelay(10); - } -} - static void esdhc_reset(struct sdhci_host *host, u8 mask) { sdhci_reset(host, mask); From 44e47693e3a680114332420a92281861bba5a9a4 Mon Sep 17 00:00:00 2001 From: "yinbo.zhu" Date: Wed, 8 Nov 2017 17:09:50 +0800 Subject: [PATCH 2143/3326] mmc: sdhci-of-esdhc: fix eMMC couldn't work after kexec commit 97618aca1440b5addc5c3d78659d3e176be23b80 upstream. The bit eSDHC_TBCTL[TB_EN] couldn't be reset by eSDHC_SYSCTL[RSTA] which is used to reset for all. The driver should make sure it's cleared before card initialization, otherwise the initialization would fail. Signed-off-by: yinbo.zhu Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson Cc: Rasmus Villemoes Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/sdhci-of-esdhc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 1f424374bbbb5d..d74030f3bd1268 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -785,6 +785,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) pltfm_host = sdhci_priv(host); esdhc = sdhci_pltfm_priv(pltfm_host); + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~ESDHC_TB_EN; + sdhci_writel(host, val, ESDHC_TBCTL); + host_ver = sdhci_readw(host, SDHCI_HOST_VERSION); esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; From 9a2b3777bd5335e34ec585ecc8e29d575dfc8c13 Mon Sep 17 00:00:00 2001 From: "yinbo.zhu" Date: Fri, 1 Dec 2017 15:09:34 +0800 Subject: [PATCH 2144/3326] mmc: sdhci-of-esdhc: fix the mmc error after sleep on ls1046ardb commit f2bc600008bd6f7f5d0b6b56238d14f95cd454d2 upstream. When system wakes up from sleep on ls1046ardb, the SD operation fails with mmc error messages since ESDHC_TB_EN bit couldn't be cleaned by eSDHC_SYSCTL[RSTA]. It's proper to clean this bit in esdhc_reset() rather than in probe. Signed-off-by: yinbo.zhu Acked-by: Yangbo Lu Acked-by: Adrian Hunter Signed-off-by: Ulf Hansson Cc: Rasmus Villemoes Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/sdhci-of-esdhc.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index d74030f3bd1268..4ffa6b173a21ef 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -589,10 +589,18 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) static void esdhc_reset(struct sdhci_host *host, u8 mask) { + u32 val; + sdhci_reset(host, mask); sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + + if (mask & SDHCI_RESET_ALL) { + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~ESDHC_TB_EN; + sdhci_writel(host, val, ESDHC_TBCTL); + } } /* The SCFG, Supplemental Configuration Unit, provides SoC specific @@ -785,10 +793,6 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) pltfm_host = sdhci_priv(host); esdhc = sdhci_pltfm_priv(pltfm_host); - val = sdhci_readl(host, ESDHC_TBCTL); - val &= ~ESDHC_TB_EN; - sdhci_writel(host, val, ESDHC_TBCTL); - host_ver = sdhci_readw(host, SDHCI_HOST_VERSION); esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; From f1a81c0eab38a1cf8887cd10731c77ad547cc17c Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Fri, 5 Jan 2018 14:55:33 -0600 Subject: [PATCH 2145/3326] ASoC: acpi: fix machine driver selection based on quirk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 5c256045b87b8aa8e5bc9d2e2fdc0802351c1f99 upstream. The ACPI/machine-driver code refactoring introduced in 4.13 introduced a regression for cases where we need a DMI-based quirk to select the machine driver (the BIOS reports an invalid HID). The fix is just to make sure the results of the quirk are actually used. Fixes: 54746dabf770 ('ASoC: Improve machine driver selection based on quirk data') Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=96691 Tested-by: Nicole Færber Signed-off-by: Pierre-Louis Bossart Signed-off-by: Mark Brown Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- sound/soc/intel/common/sst-match-acpi.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/sound/soc/intel/common/sst-match-acpi.c b/sound/soc/intel/common/sst-match-acpi.c index 56d26f36a3cba1..b4a929562218ec 100644 --- a/sound/soc/intel/common/sst-match-acpi.c +++ b/sound/soc/intel/common/sst-match-acpi.c @@ -83,11 +83,9 @@ struct sst_acpi_mach *sst_acpi_find_machine(struct sst_acpi_mach *machines) for (mach = machines; mach->id[0]; mach++) { if (sst_acpi_check_hid(mach->id) == true) { - if (mach->machine_quirk == NULL) - return mach; - - if (mach->machine_quirk(mach) != NULL) - return mach; + if (mach->machine_quirk) + mach = mach->machine_quirk(mach); + return mach; } } return NULL; From e506ac1dab86c88b03f62404055a35262dda310d Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sun, 14 Jan 2018 18:35:40 +0200 Subject: [PATCH 2146/3326] ovl: hash directory inodes for fsnotify commit 31747eda41ef3c30c09c5c096b380bf54013746a upstream. fsnotify pins a watched directory inode in cache, but if directory dentry is released, new lookup will allocate a new dentry and a new inode. Directory events will be notified on the new inode, while fsnotify listener is watching the old pinned inode. Hash all directory inodes to reuse the pinned inode on lookup. Pure upper dirs are hashes by real upper inode, merge and lower dirs are hashed by real lower inode. The reference to lower inode was being held by the lower dentry object in the overlay dentry (oe->lowerstack[0]). Releasing the overlay dentry may drop lower inode refcount to zero. Add a refcount on behalf of the overlay inode to prevent that. As a by-product, hashing directory inodes also detects multiple redirected dirs to the same lower dir and uncovered redirected dir target on and returns -ESTALE on lookup. The reported issue dates back to initial version of overlayfs, but this patch depends on ovl_inode code that was introduced in kernel v4.13. Cc: #v4.13 Reported-by: Niklas Cassel Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Tested-by: Niklas Cassel Signed-off-by: Greg Kroah-Hartman --- fs/overlayfs/inode.c | 37 +++++++++++++++++++++++++++---------- fs/overlayfs/super.c | 1 + fs/overlayfs/util.c | 4 ++-- 3 files changed, 30 insertions(+), 12 deletions(-) diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 321511ed8c4235..d60900b615f9c7 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -579,6 +579,16 @@ static int ovl_inode_set(struct inode *inode, void *data) static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry, struct dentry *upperdentry) { + if (S_ISDIR(inode->i_mode)) { + /* Real lower dir moved to upper layer under us? */ + if (!lowerdentry && ovl_inode_lower(inode)) + return false; + + /* Lookup of an uncovered redirect origin? */ + if (!upperdentry && ovl_inode_upper(inode)) + return false; + } + /* * Allow non-NULL lower inode in ovl_inode even if lowerdentry is NULL. * This happens when finding a copied up overlay inode for a renamed @@ -606,6 +616,8 @@ struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry, struct inode *inode; /* Already indexed or could be indexed on copy up? */ bool indexed = (index || (ovl_indexdir(dentry->d_sb) && !upperdentry)); + struct dentry *origin = indexed ? lowerdentry : NULL; + bool is_dir; if (WARN_ON(upperdentry && indexed && !lowerdentry)) return ERR_PTR(-EIO); @@ -614,15 +626,19 @@ struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry, realinode = d_inode(lowerdentry); /* - * Copy up origin (lower) may exist for non-indexed upper, but we must - * not use lower as hash key in that case. - * Hash inodes that are or could be indexed by origin inode and - * non-indexed upper inodes that could be hard linked by upper inode. + * Copy up origin (lower) may exist for non-indexed non-dir upper, but + * we must not use lower as hash key in that case. + * Hash non-dir that is or could be indexed by origin inode. + * Hash dir that is or could be merged by origin inode. + * Hash pure upper and non-indexed non-dir by upper inode. */ - if (!S_ISDIR(realinode->i_mode) && (upperdentry || indexed)) { - struct inode *key = d_inode(indexed ? lowerdentry : - upperdentry); - unsigned int nlink; + is_dir = S_ISDIR(realinode->i_mode); + if (is_dir) + origin = lowerdentry; + + if (upperdentry || origin) { + struct inode *key = d_inode(origin ?: upperdentry); + unsigned int nlink = is_dir ? 1 : realinode->i_nlink; inode = iget5_locked(dentry->d_sb, (unsigned long) key, ovl_inode_test, ovl_inode_set, key); @@ -643,8 +659,9 @@ struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry, goto out; } - nlink = ovl_get_nlink(lowerdentry, upperdentry, - realinode->i_nlink); + /* Recalculate nlink for non-dir due to indexing */ + if (!is_dir) + nlink = ovl_get_nlink(lowerdentry, upperdentry, nlink); set_nlink(inode, nlink); } else { inode = new_inode(dentry->d_sb); diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index f5738e96a052fe..b8f8d666e8d45b 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -200,6 +200,7 @@ static void ovl_destroy_inode(struct inode *inode) struct ovl_inode *oi = OVL_I(inode); dput(oi->__upperdentry); + iput(oi->lower); kfree(oi->redirect); ovl_dir_cache_free(inode); mutex_destroy(&oi->lock); diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index b9b239fa5cfd28..f60ce2e04df0ec 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -253,7 +253,7 @@ void ovl_inode_init(struct inode *inode, struct dentry *upperdentry, if (upperdentry) OVL_I(inode)->__upperdentry = upperdentry; if (lowerdentry) - OVL_I(inode)->lower = d_inode(lowerdentry); + OVL_I(inode)->lower = igrab(d_inode(lowerdentry)); ovl_copyattr(d_inode(upperdentry ?: lowerdentry), inode); } @@ -269,7 +269,7 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry) */ smp_wmb(); OVL_I(inode)->__upperdentry = upperdentry; - if (!S_ISDIR(upperinode->i_mode) && inode_unhashed(inode)) { + if (inode_unhashed(inode)) { inode->i_private = upperinode; __insert_inode_hash(inode, (unsigned long) upperinode); } From 58056a531e440061142a4481358ebb365193df5e Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 22 Feb 2018 15:42:33 +0100 Subject: [PATCH 2147/3326] Linux 4.14.21 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 33176140f1336e..68d70485b08869 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 20 +SUBLEVEL = 21 EXTRAVERSION = NAME = Petit Gorille From ebf7d035c39a70756a9b68a6a670ee8a39ecc586 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 4 Oct 2017 16:15:59 +0200 Subject: [PATCH 2148/3326] usb: core: Add a helper function to check the validity of EP type in URB commit e901b9873876ca30a09253731bd3a6b00c44b5b0 upstream. This patch adds a new helper function to perform a sanity check of the given URB to see whether it contains a valid endpoint. It's a light- weight version of what usb_submit_urb() does, but without the kernel warning followed by the stack trace, just returns an error code. Especially for a driver that doesn't parse the descriptor but fills the URB with the fixed endpoint (e.g. some quirks for non-compliant devices), this kind of check is preferable at the probe phase before actually submitting the urb. Tested-by: Andrey Konovalov Acked-by: Greg Kroah-Hartman Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/urb.c | 30 ++++++++++++++++++++++++++---- include/linux/usb.h | 2 ++ 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 47903d510955b0..8b800e34407bf8 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -187,6 +187,31 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb); /*-------------------------------------------------------------------*/ +static const int pipetypes[4] = { + PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT +}; + +/** + * usb_urb_ep_type_check - sanity check of endpoint in the given urb + * @urb: urb to be checked + * + * This performs a light-weight sanity check for the endpoint in the + * given urb. It returns 0 if the urb contains a valid endpoint, otherwise + * a negative error code. + */ +int usb_urb_ep_type_check(const struct urb *urb) +{ + const struct usb_host_endpoint *ep; + + ep = usb_pipe_endpoint(urb->dev, urb->pipe); + if (!ep) + return -EINVAL; + if (usb_pipetype(urb->pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(usb_urb_ep_type_check); + /** * usb_submit_urb - issue an asynchronous transfer request for an endpoint * @urb: pointer to the urb describing the request @@ -326,9 +351,6 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb); */ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) { - static int pipetypes[4] = { - PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT - }; int xfertype, max; struct usb_device *dev; struct usb_host_endpoint *ep; @@ -444,7 +466,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) */ /* Check that the pipe's type matches the endpoint's type */ - if (usb_pipetype(urb->pipe) != pipetypes[xfertype]) + if (usb_urb_ep_type_check(urb)) dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", usb_pipetype(urb->pipe), pipetypes[xfertype]); diff --git a/include/linux/usb.h b/include/linux/usb.h index 9c63792a8134f7..4192a1755ccbaf 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1729,6 +1729,8 @@ static inline int usb_urb_dir_out(struct urb *urb) return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT; } +int usb_urb_ep_type_check(const struct urb *urb); + void *usb_alloc_coherent(struct usb_device *dev, size_t size, gfp_t mem_flags, dma_addr_t *dma); void usb_free_coherent(struct usb_device *dev, size_t size, From ed25667f33032b7715bc4af67e6728020fe75f5e Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 23 Jan 2018 17:27:25 +0800 Subject: [PATCH 2149/3326] vhost: use mutex_lock_nested() in vhost_dev_lock_vqs() commit e9cb4239134c860e5f92c75bf5321bd377bb505b upstream. We used to call mutex_lock() in vhost_dev_lock_vqs() which tries to hold mutexes of all virtqueues. This may confuse lockdep to report a possible deadlock because of trying to hold locks belong to same class. Switch to use mutex_lock_nested() to avoid false positive. Fixes: 6b1e6cc7855b0 ("vhost: new device IOTLB API") Reported-by: syzbot+dbb7c1161485e61b0241@syzkaller.appspotmail.com Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/vhost/vhost.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index d6dbb28245e6d2..a827c1a684a9ab 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -904,7 +904,7 @@ static void vhost_dev_lock_vqs(struct vhost_dev *d) { int i = 0; for (i = 0; i < d->nvqs; ++i) - mutex_lock(&d->vqs[i]->mutex); + mutex_lock_nested(&d->vqs[i]->mutex, i); } static void vhost_dev_unlock_vqs(struct vhost_dev *d) From 44890e9ff771ef11777b2d1ebf8589255eb12502 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Wed, 24 Jan 2018 12:35:41 -0800 Subject: [PATCH 2150/3326] kcm: Check if sk_user_data already set in kcm_attach commit e5571240236c5652f3e079b1d5866716a7ad819c upstream. This is needed to prevent sk_user_data being overwritten. The check is done under the callback lock. This should prevent a socket from being attached twice to a KCM mux. It also prevents a socket from being attached for other use cases of sk_user_data as long as the other cases set sk_user_data under the lock. Followup work is needed to unify all the use cases of sk_user_data to use the same locking. Reported-by: syzbot+114b15f2be420a8886c3@syzkaller.appspotmail.com Fixes: ab7ac4eb9832 ("kcm: Kernel Connection Multiplexor module") Signed-off-by: Tom Herbert Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/kcm/kcmsock.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index c5fa634e63ca2c..51a5e39693a7fb 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1405,9 +1405,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock, return err; } - sock_hold(csk); - write_lock_bh(&csk->sk_callback_lock); + + /* Check if sk_user_data is aready by KCM or someone else. + * Must be done under lock to prevent race conditions. + */ + if (csk->sk_user_data) { + write_unlock_bh(&csk->sk_callback_lock); + strp_done(&psock->strp); + kmem_cache_free(kcm_psockp, psock); + return -EALREADY; + } + psock->save_data_ready = csk->sk_data_ready; psock->save_write_space = csk->sk_write_space; psock->save_state_change = csk->sk_state_change; @@ -1415,8 +1424,11 @@ static int kcm_attach(struct socket *sock, struct socket *csock, csk->sk_data_ready = psock_data_ready; csk->sk_write_space = psock_write_space; csk->sk_state_change = psock_state_change; + write_unlock_bh(&csk->sk_callback_lock); + sock_hold(csk); + /* Finished initialization, now add the psock to the MUX. */ spin_lock_bh(&mux->lock); head = &mux->psocks; From 6c0b71202f83f7851f173caee9059f830e3b0dfd Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Wed, 24 Jan 2018 12:35:40 -0800 Subject: [PATCH 2151/3326] kcm: Only allow TCP sockets to be attached to a KCM mux commit 581e7226a5d43f629eb6399a121f85f6a15f81be upstream. TCP sockets for IPv4 and IPv6 that are not listeners or in closed stated are allowed to be attached to a KCM mux. Fixes: ab7ac4eb9832 ("kcm: Kernel Connection Multiplexor module") Reported-by: syzbot+8865eaff7f9acd593945@syzkaller.appspotmail.com Signed-off-by: Tom Herbert Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/kcm/kcmsock.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 51a5e39693a7fb..58d53b907d5397 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1387,8 +1387,13 @@ static int kcm_attach(struct socket *sock, struct socket *csock, if (!csk) return -EINVAL; - /* We must prevent loops or risk deadlock ! */ - if (csk->sk_family == PF_KCM) + /* Only allow TCP sockets to be attached for now */ + if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) || + csk->sk_protocol != IPPROTO_TCP) + return -EOPNOTSUPP; + + /* Don't allow listeners or closed sockets */ + if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) return -EOPNOTSUPP; psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL); From ddf0936b9eefe0af6d046cd7d6a9212478812c9a Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 18 Jan 2018 01:15:21 +0100 Subject: [PATCH 2152/3326] bpf: mark dst unknown on inconsistent {s, u}bounds adjustments commit 6f16101e6a8b4324c36e58a29d9e0dbb287cdedb upstream. syzkaller generated a BPF proglet and triggered a warning with the following: 0: (b7) r0 = 0 1: (d5) if r0 s<= 0x0 goto pc+0 R0=inv0 R1=ctx(id=0,off=0,imm=0) R10=fp0 2: (1f) r0 -= r1 R0=inv0 R1=ctx(id=0,off=0,imm=0) R10=fp0 verifier internal error: known but bad sbounds What happens is that in the first insn, r0's min/max value are both 0 due to the immediate assignment, later in the jsle test the bounds are updated for the min value in the false path, meaning, they yield smin_val = 1, smax_val = 0, and when ctx pointer is subtracted from r0, verifier bails out with the internal error and throwing a WARN since smin_val != smax_val for the known constant. For min_val > max_val scenario it means that reg_set_min_max() and reg_set_min_max_inv() (which both refine existing bounds) demonstrated that such branch cannot be taken at runtime. In above scenario for the case where it will be taken, the existing [0, 0] bounds are kept intact. Meaning, the rejection is not due to a verifier internal error, and therefore the WARN() is not necessary either. We could just reject such cases in adjust_{ptr,scalar}_min_max_vals() when either known scalars have smin_val != smax_val or umin_val != umax_val or any scalar reg with bounds smin_val > smax_val or umin_val > umax_val. However, there may be a small risk of breakage of buggy programs, so handle this more gracefully and in adjust_{ptr,scalar}_min_max_vals() just taint the dst reg as unknown scalar when we see ops with such kind of src reg. Reported-by: syzbot+6d362cadd45dc0a12ba4@syzkaller.appspotmail.com Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 25 ++-- tools/testing/selftests/bpf/test_verifier.c | 123 +++++++++++++++++++- 2 files changed, 138 insertions(+), 10 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f9339c3219bc4d..b5ae6488b8907c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1865,15 +1865,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, dst_reg = ®s[dst]; - if (WARN_ON_ONCE(known && (smin_val != smax_val))) { - print_verifier_state(&env->cur_state); - verbose("verifier internal error: known but bad sbounds\n"); - return -EINVAL; - } - if (WARN_ON_ONCE(known && (umin_val != umax_val))) { - print_verifier_state(&env->cur_state); - verbose("verifier internal error: known but bad ubounds\n"); - return -EINVAL; + if ((known && (smin_val != smax_val || umin_val != umax_val)) || + smin_val > smax_val || umin_val > umax_val) { + /* Taint dst register if offset had invalid bounds derived from + * e.g. dead branches. + */ + __mark_reg_unknown(dst_reg); + return 0; } if (BPF_CLASS(insn->code) != BPF_ALU64) { @@ -2075,6 +2073,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); + if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || + smin_val > smax_val || umin_val > umax_val) { + /* Taint dst register if offset had invalid bounds derived from + * e.g. dead branches. + */ + __mark_reg_unknown(dst_reg); + return 0; + } + if (!src_known && opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { __mark_reg_unknown(dst_reg); diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 16299939d3fff5..c55d265489ca7c 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -6534,7 +6534,7 @@ static struct bpf_test tests[] = { BPF_JMP_IMM(BPF_JA, 0, 0, -7), }, .fixup_map1 = { 4 }, - .errstr = "unbounded min value", + .errstr = "R0 invalid mem access 'inv'", .result = REJECT, }, { @@ -7714,6 +7714,127 @@ static struct bpf_test tests[] = { .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, }, + { + "check deducing bounds from const, 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R0 tried to subtract pointer from scalar", + }, + { + "check deducing bounds from const, 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "check deducing bounds from const, 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R0 tried to subtract pointer from scalar", + }, + { + "check deducing bounds from const, 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "check deducing bounds from const, 5", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R0 tried to subtract pointer from scalar", + }, + { + "check deducing bounds from const, 6", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R0 tried to subtract pointer from scalar", + }, + { + "check deducing bounds from const, 7", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, ~0), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "dereference of modified ctx ptr", + }, + { + "check deducing bounds from const, 8", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, ~0), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "dereference of modified ctx ptr", + }, + { + "check deducing bounds from const, 9", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R0 tried to subtract pointer from scalar", + }, + { + "check deducing bounds from const, 10", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0), + /* Marks reg as unknown. */ + BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "math between ctx pointer and register with unbounded min value is not allowed", + }, { "XDP pkt read, pkt_end <= pkt_data', bad access 2", .insns = { From b9e441e2e6346284120c9e8e24c4050c245b8895 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 15 Jan 2018 09:58:27 +0100 Subject: [PATCH 2153/3326] cfg80211: check dev_set_name() return value commit 59b179b48ce2a6076448a44531242ac2b3f6cef2 upstream. syzbot reported a warning from rfkill_alloc(), and after a while I think that the reason is that it was doing fault injection and the dev_set_name() failed, leaving the name NULL, and we didn't check the return value and got to rfkill_alloc() with a NULL name. Since we really don't want a NULL name, we ought to check the return value. Fixes: fb28ad35906a ("net: struct device - replace bus_id with dev_name(), dev_set_name()") Reported-by: syzbot+1ddfb3357e1d7bb5b5d3@syzkaller.appspotmail.com Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- net/wireless/core.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/net/wireless/core.c b/net/wireless/core.c index 7b33e8c366bc13..33ce0484b2a03f 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -439,6 +439,8 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, if (rv) goto use_default_name; } else { + int rv; + use_default_name: /* NOTE: This is *probably* safe w/out holding rtnl because of * the restrictions on phy names. Probably this call could @@ -446,7 +448,11 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, * phyX. But, might should add some locking and check return * value, and use a different name if this one exists? */ - dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); + rv = dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); + if (rv < 0) { + kfree(rdev); + return NULL; + } } INIT_LIST_HEAD(&rdev->wiphy.wdev_list); From 447f1170c23bea791c3b5efdc0264519fd22f565 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 15 Jan 2018 09:32:36 +0100 Subject: [PATCH 2154/3326] mac80211_hwsim: validate number of different channels commit 51a1aaa631c90223888d8beac4d649dc11d2ca55 upstream. When creating a new radio on the fly, hwsim allows this to be done with an arbitrary number of channels, but cfg80211 only supports a limited number of simultaneous channels, leading to a warning. Fix this by validating the number - this requires moving the define for the maximum out to a visible header file. Reported-by: syzbot+8dd9051ff19940290931@syzkaller.appspotmail.com Fixes: b59ec8dd4394 ("mac80211_hwsim: fix number of channels in interface combinations") Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/mac80211_hwsim.c | 5 +++++ include/net/cfg80211.h | 2 ++ net/wireless/core.h | 2 -- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index a59b54328c07b8..052e67bce6b3ed 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3119,6 +3119,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) if (info->attrs[HWSIM_ATTR_CHANNELS]) param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); + if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { + GENL_SET_ERR_MSG(info, "too many channels specified"); + return -EINVAL; + } + if (info->attrs[HWSIM_ATTR_NO_VIF]) param.no_vif = true; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index f12fa5245a4531..ea0ed58db97e21 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -815,6 +815,8 @@ struct cfg80211_csa_settings { u8 count; }; +#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 + /** * struct iface_combination_params - input parameters for interface combinations * diff --git a/net/wireless/core.h b/net/wireless/core.h index 705835047f9856..90f90c7d8bf9bb 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -502,8 +502,6 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, void cfg80211_stop_nan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); -#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 - #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS #define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) #else From 201941360944afbde6ab6f5ff70f96f16d334971 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Fri, 5 Jan 2018 08:35:47 +0100 Subject: [PATCH 2155/3326] esp: Fix GRO when the headers not fully in the linear part of the skb. commit 374d1b5a81f7f9cc5e7f095ac3d5aff3f6600376 upstream. The GRO layer does not necessarily pull the complete headers into the linear part of the skb, a part may remain on the first page fragment. This can lead to a crash if we try to pull the headers, so make sure we have them on the linear part before pulling. Fixes: 7785bba299a8 ("esp: Add a software GRO codepath") Reported-by: syzbot+82bbd65569c49c6c0c4d@syzkaller.appspotmail.com Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman --- net/ipv4/esp4_offload.c | 3 ++- net/ipv6/esp6_offload.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 56c49623bb9d73..29b333a62ab01d 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -38,7 +38,8 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head, __be32 spi; int err; - skb_pull(skb, offset); + if (!pskb_pull(skb, offset)) + return NULL; if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) goto out; diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 1ea9d794447e78..f52c314d4c9708 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -60,7 +60,8 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head, int nhoff; int err; - skb_pull(skb, offset); + if (!pskb_pull(skb, offset)) + return NULL; if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) goto out; From 85c31887a20da06fe2803b4ec1031b9af89c1899 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sat, 6 Jan 2018 01:13:08 +0100 Subject: [PATCH 2156/3326] xfrm: don't call xfrm_policy_cache_flush while holding spinlock commit b1bdcb59b64f806ef08d25a85c39ffb3ad841ce6 upstream. xfrm_policy_cache_flush can sleep, so it cannot be called while holding a spinlock. We could release the lock first, but I don't see why we need to invoke this function here in first place, the packet path won't reuse an xdst entry unless its still valid. While at it, add an annotation to xfrm_policy_cache_flush, it would have probably caught this bug sooner. Fixes: ec30d78c14a813 ("xfrm: add xdst pcpu cache") Reported-by: syzbot+e149f7d1328c26f9c12f@syzkaller.appspotmail.com Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman --- net/xfrm/xfrm_policy.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 688ed34f0671f5..27b91400022138 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -975,8 +975,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid) } if (!cnt) err = -ESRCH; - else - xfrm_policy_cache_flush(); out: spin_unlock_bh(&net->xfrm.xfrm_policy_lock); return err; @@ -1738,6 +1736,8 @@ void xfrm_policy_cache_flush(void) bool found = 0; int cpu; + might_sleep(); + local_bh_disable(); rcu_read_lock(); for_each_possible_cpu(cpu) { From e8370258ca0bc9e630043157db862be2c5c341a6 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Sun, 31 Dec 2017 16:18:56 +0100 Subject: [PATCH 2157/3326] xfrm: fix rcu usage in xfrm_get_type_offload commit 2f10a61cee8fdb9f8da90f5db687e1862b22cf06 upstream. request_module can sleep, thus we cannot hold rcu_read_lock() while calling it. The function also jumps back and takes rcu_read_lock() again (in xfrm_state_get_afinfo()), resulting in an imbalance. This codepath is triggered whenever a new offloaded state is created. Fixes: ffdb5211da1c ("xfrm: Auto-load xfrm offload modules") Reported-by: syzbot+ca425f44816d749e8eb49755567a75ee48cf4a30@syzkaller.appspotmail.com Signed-off-by: Sabrina Dubroca Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman --- net/xfrm/xfrm_state.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 1f5cee2269af42..58be0e7f4c7d86 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -313,13 +313,14 @@ xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load) if ((type && !try_module_get(type->owner))) type = NULL; + rcu_read_unlock(); + if (!type && try_load) { request_module("xfrm-offload-%d-%d", family, proto); try_load = 0; goto retry; } - rcu_read_unlock(); return type; } From 8e754b4ec589f14ef6efa454ee872580ba1802fa Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 27 Dec 2017 23:25:45 +0100 Subject: [PATCH 2158/3326] xfrm: skip policies marked as dead while rehashing commit 862591bf4f519d1b8d859af720fafeaebdd0162a upstream. syzkaller triggered following KASAN splat: BUG: KASAN: slab-out-of-bounds in xfrm_hash_rebuild+0xdbe/0xf00 net/xfrm/xfrm_policy.c:618 read of size 2 at addr ffff8801c8e92fe4 by task kworker/1:1/23 [..] Workqueue: events xfrm_hash_rebuild [..] __asan_report_load2_noabort+0x14/0x20 mm/kasan/report.c:428 xfrm_hash_rebuild+0xdbe/0xf00 net/xfrm/xfrm_policy.c:618 process_one_work+0xbbf/0x1b10 kernel/workqueue.c:2112 worker_thread+0x223/0x1990 kernel/workqueue.c:2246 [..] The reproducer triggers: 1016 if (error) { 1017 list_move_tail(&walk->walk.all, &x->all); 1018 goto out; 1019 } in xfrm_policy_walk() via pfkey (it sets tiny rcv space, dump callback returns -ENOBUFS). In this case, *walk is located the pfkey socket struct, so this socket becomes visible in the global policy list. It looks like this is intentional -- phony walker has walk.dead set to 1 and all other places skip such "policies". Ccing original authors of the two commits that seem to expose this issue (first patch missed ->dead check, second patch adds pfkey sockets to policies dumper list). Fixes: 880a6fab8f6ba5b ("xfrm: configure policy hash table thresholds by netlink") Fixes: 12a169e7d8f4b1c ("ipsec: Put dumpers on the dump list") Cc: Herbert Xu Cc: Timo Teras Cc: Christophe Gouault Reported-by: syzbot Signed-off-by: Florian Westphal Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman --- net/xfrm/xfrm_policy.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 27b91400022138..9dcfb7e080e14e 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -610,7 +610,8 @@ static void xfrm_hash_rebuild(struct work_struct *work) /* re-insert all policies by order of creation */ list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { - if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) { + if (policy->walk.dead || + xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) { /* skip socket policies */ continue; } From 7880fc541566166d140954825fc83c826534e622 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Mon, 18 Dec 2017 20:31:41 +0900 Subject: [PATCH 2159/3326] mm,vmscan: Make unregister_shrinker() no-op if register_shrinker() failed. commit bb422a738f6566f7439cd347d54e321e4fe92a9f upstream. Syzbot caught an oops at unregister_shrinker() because combination of commit 1d3d4437eae1bb29 ("vmscan: per-node deferred work") and fault injection made register_shrinker() fail and the caller of register_shrinker() did not check for failure. ---------- [ 554.881422] FAULT_INJECTION: forcing a failure. [ 554.881422] name failslab, interval 1, probability 0, space 0, times 0 [ 554.881438] CPU: 1 PID: 13231 Comm: syz-executor1 Not tainted 4.14.0-rc8+ #82 [ 554.881443] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 [ 554.881445] Call Trace: [ 554.881459] dump_stack+0x194/0x257 [ 554.881474] ? arch_local_irq_restore+0x53/0x53 [ 554.881486] ? find_held_lock+0x35/0x1d0 [ 554.881507] should_fail+0x8c0/0xa40 [ 554.881522] ? fault_create_debugfs_attr+0x1f0/0x1f0 [ 554.881537] ? check_noncircular+0x20/0x20 [ 554.881546] ? find_next_zero_bit+0x2c/0x40 [ 554.881560] ? ida_get_new_above+0x421/0x9d0 [ 554.881577] ? find_held_lock+0x35/0x1d0 [ 554.881594] ? __lock_is_held+0xb6/0x140 [ 554.881628] ? check_same_owner+0x320/0x320 [ 554.881634] ? lock_downgrade+0x990/0x990 [ 554.881649] ? find_held_lock+0x35/0x1d0 [ 554.881672] should_failslab+0xec/0x120 [ 554.881684] __kmalloc+0x63/0x760 [ 554.881692] ? lock_downgrade+0x990/0x990 [ 554.881712] ? register_shrinker+0x10e/0x2d0 [ 554.881721] ? trace_event_raw_event_module_request+0x320/0x320 [ 554.881737] register_shrinker+0x10e/0x2d0 [ 554.881747] ? prepare_kswapd_sleep+0x1f0/0x1f0 [ 554.881755] ? _down_write_nest_lock+0x120/0x120 [ 554.881765] ? memcpy+0x45/0x50 [ 554.881785] sget_userns+0xbcd/0xe20 (...snipped...) [ 554.898693] kasan: CONFIG_KASAN_INLINE enabled [ 554.898724] kasan: GPF could be caused by NULL-ptr deref or user memory access [ 554.898732] general protection fault: 0000 [#1] SMP KASAN [ 554.898737] Dumping ftrace buffer: [ 554.898741] (ftrace buffer empty) [ 554.898743] Modules linked in: [ 554.898752] CPU: 1 PID: 13231 Comm: syz-executor1 Not tainted 4.14.0-rc8+ #82 [ 554.898755] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 [ 554.898760] task: ffff8801d1dbe5c0 task.stack: ffff8801c9e38000 [ 554.898772] RIP: 0010:__list_del_entry_valid+0x7e/0x150 [ 554.898775] RSP: 0018:ffff8801c9e3f108 EFLAGS: 00010246 [ 554.898780] RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000000000 [ 554.898784] RDX: 0000000000000000 RSI: ffff8801c53c6f98 RDI: ffff8801c53c6fa0 [ 554.898788] RBP: ffff8801c9e3f120 R08: 1ffff100393c7d55 R09: 0000000000000004 [ 554.898791] R10: ffff8801c9e3ef70 R11: 0000000000000000 R12: 0000000000000000 [ 554.898795] R13: dffffc0000000000 R14: 1ffff100393c7e45 R15: ffff8801c53c6f98 [ 554.898800] FS: 0000000000000000(0000) GS:ffff8801db300000(0000) knlGS:0000000000000000 [ 554.898804] CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033 [ 554.898807] CR2: 00000000dbc23000 CR3: 00000001c7269000 CR4: 00000000001406e0 [ 554.898813] DR0: 0000000020000000 DR1: 0000000020000000 DR2: 0000000000000000 [ 554.898816] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000600 [ 554.898818] Call Trace: [ 554.898828] unregister_shrinker+0x79/0x300 [ 554.898837] ? perf_trace_mm_vmscan_writepage+0x750/0x750 [ 554.898844] ? down_write+0x87/0x120 [ 554.898851] ? deactivate_super+0x139/0x1b0 [ 554.898857] ? down_read+0x150/0x150 [ 554.898864] ? check_same_owner+0x320/0x320 [ 554.898875] deactivate_locked_super+0x64/0xd0 [ 554.898883] deactivate_super+0x141/0x1b0 ---------- Since allowing register_shrinker() callers to call unregister_shrinker() when register_shrinker() failed can simplify error recovery path, this patch makes unregister_shrinker() no-op when register_shrinker() failed. Also, reset shrinker->nr_deferred in case unregister_shrinker() was by error called twice. Signed-off-by: Tetsuo Handa Signed-off-by: Aliaksei Karaliou Reported-by: syzbot Cc: Glauber Costa Cc: Al Viro Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- mm/vmscan.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/vmscan.c b/mm/vmscan.c index eb2f0315b8c0e9..441f346fb5e2fd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -297,10 +297,13 @@ EXPORT_SYMBOL(register_shrinker); */ void unregister_shrinker(struct shrinker *shrinker) { + if (!shrinker->nr_deferred) + return; down_write(&shrinker_rwsem); list_del(&shrinker->list); up_write(&shrinker_rwsem); kfree(shrinker->nr_deferred); + shrinker->nr_deferred = NULL; } EXPORT_SYMBOL(unregister_shrinker); From 846666fad8d7de79b47a4147e39330fe96e1bdc3 Mon Sep 17 00:00:00 2001 From: Lan Tianyu Date: Thu, 14 Dec 2017 03:01:52 -0500 Subject: [PATCH 2160/3326] KVM/x86: Check input paging mode when cs.l is set MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit f29810335965ac1f7bcb501ee2af5f039f792416 upstream. Reported by syzkaller: WARNING: CPU: 0 PID: 27962 at arch/x86/kvm/emulate.c:5631 x86_emulate_insn+0x557/0x15f0 [kvm] Modules linked in: kvm_intel kvm [last unloaded: kvm] CPU: 0 PID: 27962 Comm: syz-executor Tainted: G B W 4.15.0-rc2-next-20171208+ #32 Hardware name: Intel Corporation S1200SP/S1200SP, BIOS S1200SP.86B.01.03.0006.040720161253 04/07/2016 RIP: 0010:x86_emulate_insn+0x557/0x15f0 [kvm] RSP: 0018:ffff8807234476d0 EFLAGS: 00010282 RAX: 0000000000000000 RBX: ffff88072d0237a0 RCX: ffffffffa0065c4d RDX: 1ffff100e5a046f9 RSI: 0000000000000003 RDI: ffff88072d0237c8 RBP: ffff880723447728 R08: ffff88072d020000 R09: ffffffffa008d240 R10: 0000000000000002 R11: ffffed00e7d87db3 R12: ffff88072d0237c8 R13: ffff88072d023870 R14: ffff88072d0238c2 R15: ffffffffa008d080 FS: 00007f8a68666700(0000) GS:ffff880802200000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000000002009506c CR3: 000000071fec4005 CR4: 00000000003626f0 Call Trace: x86_emulate_instruction+0x3bc/0xb70 [kvm] ? reexecute_instruction.part.162+0x130/0x130 [kvm] vmx_handle_exit+0x46d/0x14f0 [kvm_intel] ? trace_event_raw_event_kvm_entry+0xe7/0x150 [kvm] ? handle_vmfunc+0x2f0/0x2f0 [kvm_intel] ? wait_lapic_expire+0x25/0x270 [kvm] vcpu_enter_guest+0x720/0x1ef0 [kvm] ... When CS.L is set, vcpu should run in the 64 bit paging mode. Current kvm set_sregs function doesn't have such check when userspace inputs sreg values. This will lead unexpected behavior. This patch is to add checks for CS.L, EFER.LME, EFER.LMA and CR4.PAE when get SREG inputs from userspace in order to avoid unexpected behavior. Suggested-by: Paolo Bonzini Reported-by: Dmitry Vyukov Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Dmitry Vyukov Cc: Jim Mattson Signed-off-by: Tianyu Lan Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/x86.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f97358423f9c48..8b7ede04e8a347 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7474,6 +7474,29 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, } EXPORT_SYMBOL_GPL(kvm_task_switch); +int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) { + /* + * When EFER.LME and CR0.PG are set, the processor is in + * 64-bit mode (though maybe in a 32-bit code segment). + * CR4.PAE and EFER.LMA must be set. + */ + if (!(sregs->cr4 & X86_CR4_PAE_BIT) + || !(sregs->efer & EFER_LMA)) + return -EINVAL; + } else { + /* + * Not in 64-bit mode: EFER.LMA is clear and the code + * segment cannot be 64-bit. + */ + if (sregs->efer & EFER_LMA || sregs->cs.l) + return -EINVAL; + } + + return 0; +} + int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { @@ -7486,6 +7509,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, (sregs->cr4 & X86_CR4_OSXSAVE)) return -EINVAL; + if (kvm_valid_sregs(vcpu, sregs)) + return -EINVAL; + apic_base_msr.data = sregs->apic_base; apic_base_msr.host_initiated = true; if (kvm_set_apic_base(vcpu, &apic_base_msr)) From d6b36a2616ab94040b24ce622bd6befb1a28e4aa Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 5 Dec 2017 22:30:04 +0200 Subject: [PATCH 2161/3326] RDMA/netlink: Fix general protection fault commit d0e312fe3d34c1bc014a7f8ec6540d05e8077483 upstream. The RDMA netlink core code checks validity of messages by ensuring that type and operand are in range. It works well for almost all clients except NLDEV, which has cb_table less than number of operands. Request to access such operand will trigger the following kernel panic. This patch updates all places where cb_table is declared for the consistency, but only NLDEV is actually need it. general protection fault: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN Modules linked in: CPU: 0 PID: 522 Comm: syz-executor6 Not tainted 4.13.0+ #4 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014 task: ffff8800657799c0 task.stack: ffff8800695d000 RIP: 0010:rdma_nl_rcv_msg+0x13a/0x4c0 RSP: 0018:ffff8800695d7838 EFLAGS: 00010207 RAX: dffffc0000000000 RBX: 1ffff1000d2baf0b RCX: 00000000704ff4d7 RDX: 0000000000000000 RSI: ffffffff81ddb03c RDI: 00000003827fa6bc RBP: ffff8800695d7900 R08: ffffffff82ec0578 R09: 0000000000000000 R10: ffff8800695d7900 R11: 0000000000000001 R12: 000000000000001c R13: ffff880069d31e00 R14: 00000000ffffffff R15: ffff880069d357c0 FS: 00007fee6acb8700(0000) GS:ffff88006ca00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000201a9000 CR3: 0000000059766000 CR4: 00000000000006b0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: ? rdma_nl_multicast+0x80/0x80 rdma_nl_rcv+0x36b/0x4d0 ? ibnl_put_attr+0xc0/0xc0 netlink_unicast+0x4bd/0x6d0 ? netlink_sendskb+0x50/0x50 ? drop_futex_key_refs.isra.4+0x68/0xb0 netlink_sendmsg+0x9ab/0xbd0 ? nlmsg_notify+0x140/0x140 ? wake_up_q+0xa1/0xf0 ? drop_futex_key_refs.isra.4+0x68/0xb0 sock_sendmsg+0x88/0xd0 sock_write_iter+0x228/0x3c0 ? sock_sendmsg+0xd0/0xd0 ? do_futex+0x3e5/0xb20 ? iov_iter_init+0xaf/0x1d0 __vfs_write+0x46e/0x640 ? sched_clock_cpu+0x1b/0x190 ? __vfs_read+0x620/0x620 ? __fget+0x23a/0x390 ? rw_verify_area+0xca/0x290 vfs_write+0x192/0x490 SyS_write+0xde/0x1c0 ? SyS_read+0x1c0/0x1c0 ? trace_hardirqs_on_thunk+0x1a/0x1c entry_SYSCALL_64_fastpath+0x18/0xad RIP: 0033:0x7fee6a74a219 RSP: 002b:00007fee6acb7d58 EFLAGS: 00000212 ORIG_RAX: 0000000000000001 RAX: ffffffffffffffda RBX: 0000000000638000 RCX: 00007fee6a74a219 RDX: 0000000000000078 RSI: 0000000020141000 RDI: 0000000000000006 RBP: 0000000000000046 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000212 R12: ffff8800695d7f98 R13: 0000000020141000 R14: 0000000000000006 R15: 00000000ffffffff Code: d6 48 b8 00 00 00 00 00 fc ff df 66 41 81 e4 ff 03 44 8d 72 ff 4a 8d 3c b5 c0 a6 7f 82 44 89 b5 4c ff ff ff 48 89 f9 48 c1 e9 03 <0f> b6 0c 01 48 89 f8 83 e0 07 83 c0 03 38 c8 7c 08 84 c9 0f 85 RIP: rdma_nl_rcv_msg+0x13a/0x4c0 RSP: ffff8800695d7838 ---[ end trace ba085d123959c8ec ]--- Kernel panic - not syncing: Fatal exception Cc: syzkaller Fixes: b4c598a67ea1 ("RDMA/netlink: Implement nldev device dumpit calback") Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky Signed-off-by: Greg Kroah-Hartman Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 2 +- drivers/infiniband/core/device.c | 2 +- drivers/infiniband/core/iwcm.c | 2 +- drivers/infiniband/core/nldev.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index fa79c7076ccd1b..c07d0451cd4ad8 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -4461,7 +4461,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -static const struct rdma_nl_cbs cma_cb_table[] = { +static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = { [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats}, }; diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index ebfdb5503701ef..fb01f5f1349575 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1154,7 +1154,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, } EXPORT_SYMBOL(ib_get_net_dev_by_params); -static const struct rdma_nl_cbs ibnl_ls_cb_table[] = { +static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { [RDMA_NL_LS_OP_RESOLVE] = { .doit = ib_nl_handle_resolve_resp, .flags = RDMA_NL_ADMIN_PERM, diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index fcf42f6bb82a72..30d7277249b8ab 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason) } EXPORT_SYMBOL(iwcm_reject_msg); -static struct rdma_nl_cbs iwcm_nl_cb_table[] = { +static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = { [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 2fae850a3eff6a..9a05245a1acf4a 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -303,7 +303,7 @@ out: cb->args[0] = idx; return skb->len; } -static const struct rdma_nl_cbs nldev_cb_table[] = { +static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { [RDMA_NLDEV_CMD_GET] = { .doit = nldev_get_doit, .dump = nldev_get_dumpit, From 70c5e41f4734ee5cbafb5c49576bdc914f02c6b8 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 29 Nov 2017 06:53:55 +0100 Subject: [PATCH 2162/3326] xfrm: Fix stack-out-of-bounds read on socket policy lookup. commit ddc47e4404b58f03e98345398fb12d38fe291512 upstream. When we do tunnel or beet mode, we pass saddr and daddr from the template to xfrm_state_find(), this is ok. On transport mode, we pass the addresses from the flowi, assuming that the IP addresses (and address family) don't change during transformation. This assumption is wrong in the IPv4 mapped IPv6 case, packet is IPv4 and template is IPv6. Fix this by catching address family missmatches of the policy and the flow already before we do the lookup. Reported-by: syzbot Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman --- net/xfrm/xfrm_policy.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 9dcfb7e080e14e..22f5da66357b9f 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1168,9 +1168,15 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, again: pol = rcu_dereference(sk->sk_policy[dir]); if (pol != NULL) { - bool match = xfrm_selector_match(&pol->selector, fl, family); + bool match; int err = 0; + if (pol->family != family) { + pol = NULL; + goto out; + } + + match = xfrm_selector_match(&pol->selector, fl, family); if (match) { if ((sk->sk_mark & pol->mark.m) != pol->mark.v) { pol = NULL; From 4d4d55665f32c20117ec1d1a96dc807f6d04fc09 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Mon, 27 Nov 2017 11:15:16 -0800 Subject: [PATCH 2163/3326] xfrm: check id proto in validate_tmpl() commit 6a53b7593233ab9e4f96873ebacc0f653a55c3e1 upstream. syzbot reported a kernel warning in xfrm_state_fini(), which indicates that we have entries left in the list net->xfrm.state_all whose proto is zero. And xfrm_id_proto_match() doesn't consider them as a match with IPSEC_PROTO_ANY in this case. Proto with value 0 is probably not a valid value, at least verify_newsa_info() doesn't consider it valid either. This patch fixes it by checking the proto value in validate_tmpl() and rejecting invalid ones, like what iproute2 does in xfrm_xfrmproto_getbyname(). Reported-by: syzbot Cc: Steffen Klassert Cc: Herbert Xu Signed-off-by: Cong Wang Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman --- net/xfrm/xfrm_user.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index e44a0fed48dd08..487bec4851a343 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1443,6 +1443,21 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) default: return -EINVAL; } + + switch (ut[i].id.proto) { + case IPPROTO_AH: + case IPPROTO_ESP: + case IPPROTO_COMP: +#if IS_ENABLED(CONFIG_IPV6) + case IPPROTO_ROUTING: + case IPPROTO_DSTOPTS: +#endif + case IPSEC_PROTO_ANY: + break; + default: + return -EINVAL; + } + } return 0; From 2df0d6de5eff25b8fb4e858c9d77c98cae8a0bc0 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Nov 2017 14:11:11 +0800 Subject: [PATCH 2164/3326] sctp: set frag_point in sctp_setsockopt_maxseg correctly commit ecca8f88da5c4260cc2bccfefd2a24976704c366 upstream. Now in sctp_setsockopt_maxseg user_frag or frag_point can be set with val >= 8 and val <= SCTP_MAX_CHUNK_LEN. But both checks are incorrect. val >= 8 means frag_point can even be less than SCTP_DEFAULT_MINSEGMENT. Then in sctp_datamsg_from_user(), when it's value is greater than cookie echo len and trying to bundle with cookie echo chunk, the first_len will overflow. The worse case is when it's value is equal as cookie echo len, first_len becomes 0, it will go into a dead loop for fragment later on. In Hangbin syzkaller testing env, oom was even triggered due to consecutive memory allocation in that loop. Besides, SCTP_MAX_CHUNK_LEN is the max size of the whole chunk, it should deduct the data header for frag_point or user_frag check. This patch does a proper check with SCTP_DEFAULT_MINSEGMENT subtracting the sctphdr and datahdr, SCTP_MAX_CHUNK_LEN subtracting datahdr when setting frag_point via sockopt. It also improves sctp_setsockopt_maxseg codes. Suggested-by: Marcelo Ricardo Leitner Reported-by: Hangbin Liu Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/sctp/sctp.h | 3 ++- net/sctp/socket.c | 29 +++++++++++++++++++---------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index d7d8cba0146976..749a428824371a 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -444,7 +444,8 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu) if (asoc->user_frag) frag = min_t(int, frag, asoc->user_frag); - frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN)); + frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN - + sizeof(struct sctp_data_chunk))); return frag; } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 1c08d86efe9494..3c8b9266786680 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3136,9 +3136,9 @@ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsign */ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) { + struct sctp_sock *sp = sctp_sk(sk); struct sctp_assoc_value params; struct sctp_association *asoc; - struct sctp_sock *sp = sctp_sk(sk); int val; if (optlen == sizeof(int)) { @@ -3154,26 +3154,35 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned if (copy_from_user(¶ms, optval, optlen)) return -EFAULT; val = params.assoc_value; - } else + } else { return -EINVAL; + } - if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) - return -EINVAL; + if (val) { + int min_len, max_len; - asoc = sctp_id2assoc(sk, params.assoc_id); - if (!asoc && params.assoc_id && sctp_style(sk, UDP)) - return -EINVAL; + min_len = SCTP_DEFAULT_MINSEGMENT - sp->pf->af->net_header_len; + min_len -= sizeof(struct sctphdr) + + sizeof(struct sctp_data_chunk); + + max_len = SCTP_MAX_CHUNK_LEN - sizeof(struct sctp_data_chunk); + if (val < min_len || val > max_len) + return -EINVAL; + } + + asoc = sctp_id2assoc(sk, params.assoc_id); if (asoc) { if (val == 0) { - val = asoc->pathmtu; - val -= sp->pf->af->net_header_len; + val = asoc->pathmtu - sp->pf->af->net_header_len; val -= sizeof(struct sctphdr) + - sizeof(struct sctp_data_chunk); + sizeof(struct sctp_data_chunk); } asoc->user_frag = val; asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); } else { + if (params.assoc_id && sctp_style(sk, UDP)) + return -EINVAL; sp->user_frag = val; } From 90f9a1ff1e8eaa15c1e33fefa44705707db4de2b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 5 Nov 2017 09:16:09 -0700 Subject: [PATCH 2165/3326] blktrace: fix unlocked registration of tracepoints commit a6da0024ffc19e0d47712bb5ca4fd083f76b07df upstream. We need to ensure that tracepoints are registered and unregistered with the users of them. The existing atomic count isn't enough for that. Add a lock around the tracepoints, so we serialize access to them. This fixes cases where we have multiple users setting up and tearing down tracepoints, like this: CPU: 0 PID: 2995 Comm: syzkaller857118 Not tainted 4.14.0-rc5-next-20171018+ #36 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:16 [inline] dump_stack+0x194/0x257 lib/dump_stack.c:52 panic+0x1e4/0x41c kernel/panic.c:183 __warn+0x1c4/0x1e0 kernel/panic.c:546 report_bug+0x211/0x2d0 lib/bug.c:183 fixup_bug+0x40/0x90 arch/x86/kernel/traps.c:177 do_trap_no_signal arch/x86/kernel/traps.c:211 [inline] do_trap+0x260/0x390 arch/x86/kernel/traps.c:260 do_error_trap+0x120/0x390 arch/x86/kernel/traps.c:297 do_invalid_op+0x1b/0x20 arch/x86/kernel/traps.c:310 invalid_op+0x18/0x20 arch/x86/entry/entry_64.S:905 RIP: 0010:tracepoint_add_func kernel/tracepoint.c:210 [inline] RIP: 0010:tracepoint_probe_register_prio+0x397/0x9a0 kernel/tracepoint.c:283 RSP: 0018:ffff8801d1d1f6c0 EFLAGS: 00010293 RAX: ffff8801d22e8540 RBX: 00000000ffffffef RCX: ffffffff81710f07 RDX: 0000000000000000 RSI: ffffffff85b679c0 RDI: ffff8801d5f19818 RBP: ffff8801d1d1f7c8 R08: ffffffff81710c10 R09: 0000000000000004 R10: ffff8801d1d1f6b0 R11: 0000000000000003 R12: ffffffff817597f0 R13: 0000000000000000 R14: 00000000ffffffff R15: ffff8801d1d1f7a0 tracepoint_probe_register+0x2a/0x40 kernel/tracepoint.c:304 register_trace_block_rq_insert include/trace/events/block.h:191 [inline] blk_register_tracepoints+0x1e/0x2f0 kernel/trace/blktrace.c:1043 do_blk_trace_setup+0xa10/0xcf0 kernel/trace/blktrace.c:542 blk_trace_setup+0xbd/0x180 kernel/trace/blktrace.c:564 sg_ioctl+0xc71/0x2d90 drivers/scsi/sg.c:1089 vfs_ioctl fs/ioctl.c:45 [inline] do_vfs_ioctl+0x1b1/0x1520 fs/ioctl.c:685 SYSC_ioctl fs/ioctl.c:700 [inline] SyS_ioctl+0x8f/0xc0 fs/ioctl.c:691 entry_SYSCALL_64_fastpath+0x1f/0xbe RIP: 0033:0x444339 RSP: 002b:00007ffe05bb5b18 EFLAGS: 00000206 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 00000000006d66c0 RCX: 0000000000444339 RDX: 000000002084cf90 RSI: 00000000c0481273 RDI: 0000000000000009 RBP: 0000000000000082 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000206 R12: ffffffffffffffff R13: 00000000c0481273 R14: 0000000000000000 R15: 0000000000000000 since we can now run these in parallel. Ensure that the exported helpers for doing this are grabbing the queue trace mutex. Reported-by: Steven Rostedt Tested-by: Dmitry Vyukov Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- kernel/trace/blktrace.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 45a3928544ce59..e73dcab8e9f0da 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -66,7 +66,8 @@ static struct tracer_flags blk_tracer_flags = { }; /* Global reference count of probes */ -static atomic_t blk_probes_ref = ATOMIC_INIT(0); +static DEFINE_MUTEX(blk_probe_mutex); +static int blk_probes_ref; static void blk_register_tracepoints(void); static void blk_unregister_tracepoints(void); @@ -329,11 +330,26 @@ static void blk_trace_free(struct blk_trace *bt) kfree(bt); } +static void get_probe_ref(void) +{ + mutex_lock(&blk_probe_mutex); + if (++blk_probes_ref == 1) + blk_register_tracepoints(); + mutex_unlock(&blk_probe_mutex); +} + +static void put_probe_ref(void) +{ + mutex_lock(&blk_probe_mutex); + if (!--blk_probes_ref) + blk_unregister_tracepoints(); + mutex_unlock(&blk_probe_mutex); +} + static void blk_trace_cleanup(struct blk_trace *bt) { blk_trace_free(bt); - if (atomic_dec_and_test(&blk_probes_ref)) - blk_unregister_tracepoints(); + put_probe_ref(); } int blk_trace_remove(struct request_queue *q) @@ -538,8 +554,7 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, if (cmpxchg(&q->blk_trace, NULL, bt)) goto err; - if (atomic_inc_return(&blk_probes_ref) == 1) - blk_register_tracepoints(); + get_probe_ref(); ret = 0; err: @@ -1558,9 +1573,7 @@ static int blk_trace_remove_queue(struct request_queue *q) if (bt == NULL) return -EINVAL; - if (atomic_dec_and_test(&blk_probes_ref)) - blk_unregister_tracepoints(); - + put_probe_ref(); blk_trace_free(bt); return 0; } @@ -1591,8 +1604,7 @@ static int blk_trace_setup_queue(struct request_queue *q, if (cmpxchg(&q->blk_trace, NULL, bt)) goto free_bt; - if (atomic_inc_return(&blk_probes_ref) == 1) - blk_register_tracepoints(); + get_probe_ref(); return 0; free_bt: From 3587188ad596da05a8dd691fc28ee56d335f41b8 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 31 Oct 2017 09:53:28 +0100 Subject: [PATCH 2166/3326] dnotify: Handle errors from fsnotify_add_mark_locked() in fcntl_dirnotify() commit b3a0066005821acdc0cdb092cb72587182ab583f upstream. fsnotify_add_mark_locked() can fail but we do not check its return value. This didn't matter before commit 9dd813c15b2c "fsnotify: Move mark list head from object into dedicated structure" as none of possible failures could happen for dnotify but after that commit -ENOMEM can be returned. Handle this error properly in fcntl_dirnotify() as otherwise we just hit BUG_ON(dn_mark->dn) in dnotify_free_mark(). Reviewed-by: Amir Goldstein Reported-by: syzkaller Fixes: 9dd813c15b2c101168808d4f5941a29985758973 Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/notify/dnotify/dnotify.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c index cba32831592952..63a1ca4b9deee2 100644 --- a/fs/notify/dnotify/dnotify.c +++ b/fs/notify/dnotify/dnotify.c @@ -319,7 +319,11 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); spin_lock(&fsn_mark->lock); } else { - fsnotify_add_mark_locked(new_fsn_mark, inode, NULL, 0); + error = fsnotify_add_mark_locked(new_fsn_mark, inode, NULL, 0); + if (error) { + mutex_unlock(&dnotify_group->mark_mutex); + goto out_err; + } spin_lock(&new_fsn_mark->lock); fsn_mark = new_fsn_mark; dn_mark = new_dn_mark; @@ -345,6 +349,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) */ if (dn_mark == new_dn_mark) destroy = 1; + error = 0; goto out; } From 8c666e6471acb1fe33b6b88d95486ed2d548d944 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 31 Oct 2017 11:55:35 +0000 Subject: [PATCH 2167/3326] drm: Require __GFP_NOFAIL for the legacy drm_modeset_lock_all MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d18d1a5ac811d12f7ebc1129230312b5f2c50cb8 upstream. To acquire all modeset locks requires a ww_ctx to be allocated. As this is the legacy path and the allocation small, to reduce the changes required (and complex untested error handling) to the legacy drivers, we simply assume that the allocation succeeds. At present, it relies on the too-small-to-fail rule, but syzbot found that by injecting a failure here we would hit the WARN. Document that this allocation must succeed with __GFP_NOFAIL. Signed-off-by: Chris Wilson Cc: Daniel Vetter Reported-by: syzbot Signed-off-by: Daniel Vetter Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20171031115535.15166-1-chris@chris-wilson.co.uk Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/drm_modeset_lock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index af4e906c630d5b..56b9f9b1c3aed1 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -88,7 +88,7 @@ void drm_modeset_lock_all(struct drm_device *dev) struct drm_modeset_acquire_ctx *ctx; int ret; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL); if (WARN_ON(!ctx)) return; From 90bca3712acb0ddffbb85beb099eb7eeafba6cd3 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 4 Oct 2017 15:09:24 +0200 Subject: [PATCH 2168/3326] ALSA: line6: Add a sanity check for invalid EPs commit 2a4340c57717162c6bf07a0860d05711d4de994b upstream. As syzkaller spotted, currently line6 drivers submit a URB with the fixed EP without checking whether it's actually available, which may result in a kernel warning like: usb 1-1: BOGUS urb xfer, pipe 3 != type 1 ------------[ cut here ]------------ WARNING: CPU: 0 PID: 24 at drivers/usb/core/urb.c:449 usb_submit_urb+0xf8a/0x11d0 Modules linked in: CPU: 0 PID: 24 Comm: kworker/0:1 Not tainted 4.14.0-rc2-42613-g1488251d1a98 #238 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Workqueue: usb_hub_wq hub_event Call Trace: line6_start_listen+0x55f/0x9e0 sound/usb/line6/driver.c:82 line6_init_cap_control sound/usb/line6/driver.c:690 line6_probe+0x7c9/0x1310 sound/usb/line6/driver.c:764 podhd_probe+0x64/0x70 sound/usb/line6/podhd.c:474 usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361 .... This patch adds a sanity check of validity of EPs at the device initialization phase for avoiding the call with an invalid EP. Reported-by: Andrey Konovalov Tested-by: Andrey Konovalov Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/line6/driver.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index c8f723c3a03369..167aebf8276e50 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -78,6 +78,13 @@ static int line6_start_listen(struct usb_line6 *line6) line6->buffer_listen, LINE6_BUFSIZE_LISTEN, line6_data_received, line6); } + + /* sanity checks of EP before actually submitting */ + if (usb_urb_ep_type_check(line6->urb_listen)) { + dev_err(line6->ifcdev, "invalid control EP\n"); + return -EINVAL; + } + line6->urb_listen->actual_length = 0; err = usb_submit_urb(line6->urb_listen, GFP_ATOMIC); return err; From b43a3e21937c869706e94efb53ad166478ad1a93 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 4 Oct 2017 15:07:21 +0200 Subject: [PATCH 2169/3326] ALSA: caiaq: Add a sanity check for invalid EPs commit 58fc7f73a85d45a47057dad2af53502fdf6cf778 upstream. As syzkaller spotted, currently caiaq driver submits a URB with the fixed EP without checking whether it's actually available, which may result in a kernel warning like: usb 1-1: BOGUS urb xfer, pipe 3 != type 1 ------------[ cut here ]------------ WARNING: CPU: 1 PID: 1150 at drivers/usb/core/urb.c:449 usb_submit_urb+0xf8a/0x11d0 Modules linked in: CPU: 1 PID: 1150 Comm: kworker/1:1 Not tainted 4.14.0-rc2-42660-g24b7bd59eec0 #277 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Workqueue: usb_hub_wq hub_event Call Trace: init_card sound/usb/caiaq/device.c:467 snd_probe+0x81c/0x1150 sound/usb/caiaq/device.c:525 usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361 .... This patch adds a sanity check of validity of EPs at the device initialization phase for avoiding the call with an invalid EP. Reported-by: Andrey Konovalov Tested-by: Andrey Konovalov Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/caiaq/device.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c index d8409d9ae55b63..d55ca48de3ea3a 100644 --- a/sound/usb/caiaq/device.c +++ b/sound/usb/caiaq/device.c @@ -461,6 +461,13 @@ static int init_card(struct snd_usb_caiaqdev *cdev) cdev->midi_out_buf, EP1_BUFSIZE, snd_usb_caiaq_midi_output_done, cdev); + /* sanity checks of EPs before actually submitting */ + if (usb_urb_ep_type_check(&cdev->ep1_in_urb) || + usb_urb_ep_type_check(&cdev->midi_out_urb)) { + dev_err(dev, "invalid EPs\n"); + return -EINVAL; + } + init_waitqueue_head(&cdev->ep1_wait_queue); init_waitqueue_head(&cdev->prepare_wait_queue); From 73e6a383d1509d8bee58579041e76e5d36793c18 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 4 Oct 2017 15:03:40 +0200 Subject: [PATCH 2170/3326] ALSA: bcd2000: Add a sanity check for invalid EPs commit 6815a0b444572527256f0d0efd8efe3ddede6018 upstream. As syzkaller spotted, currently bcd2000 driver submits a URB with the fixed EP without checking whether it's actually available, which may result in a kernel warning like: usb 1-1: BOGUS urb xfer, pipe 1 != type 3 ------------[ cut here ]------------ WARNING: CPU: 0 PID: 1846 at drivers/usb/core/urb.c:449 usb_submit_urb+0xf8a/0x11d0 Modules linked in: CPU: 0 PID: 1846 Comm: kworker/0:2 Not tainted 4.14.0-rc2-42613-g1488251d1a98 #238 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Workqueue: usb_hub_wq hub_event Call Trace: bcd2000_init_device sound/usb/bcd2000/bcd2000.c:289 bcd2000_init_midi sound/usb/bcd2000/bcd2000.c:345 bcd2000_probe+0xe64/0x19e0 sound/usb/bcd2000/bcd2000.c:406 usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361 .... This patch adds a sanity check of validity of EPs at the device initialization phase for avoiding the call with an invalid EP. Reported-by: Andrey Konovalov Tested-by: Andrey Konovalov Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/bcd2000/bcd2000.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sound/usb/bcd2000/bcd2000.c b/sound/usb/bcd2000/bcd2000.c index 7371e5b0603564..a6408209d7f1df 100644 --- a/sound/usb/bcd2000/bcd2000.c +++ b/sound/usb/bcd2000/bcd2000.c @@ -342,6 +342,13 @@ static int bcd2000_init_midi(struct bcd2000 *bcd2k) bcd2k->midi_out_buf, BUFSIZE, bcd2000_output_complete, bcd2k, 1); + /* sanity checks of EPs before actually submitting */ + if (usb_urb_ep_type_check(bcd2k->midi_in_urb) || + usb_urb_ep_type_check(bcd2k->midi_out_urb)) { + dev_err(&bcd2k->dev->dev, "invalid MIDI EP\n"); + return -EINVAL; + } + bcd2000_init_device(bcd2k); return 0; From 6688494804d87dcc8c936941f3f9969797af5419 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 9 Feb 2018 17:45:49 +0800 Subject: [PATCH 2171/3326] ptr_ring: fail early if queue occupies more than KMALLOC_MAX_SIZE commit 6e6e41c3112276288ccaf80c70916779b84bb276 upstream. To avoid slab to warn about exceeded size, fail early if queue occupies more than KMALLOC_MAX_SIZE. Reported-by: syzbot+e4d4f9ddd4295539735d@syzkaller.appspotmail.com Fixes: 2e0ab8ca83c12 ("ptr_ring: array based FIFO for pointers") Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/linux/ptr_ring.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 6866df4f31b59d..bb515163bac3f2 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -447,6 +447,8 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) { + if (size * sizeof(void *) > KMALLOC_MAX_SIZE) + return NULL; return kcalloc(size, sizeof(void *), gfp); } From b517942f5126c04fff8ff341cdd6a28b0a8de84f Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 9 Feb 2018 17:45:50 +0800 Subject: [PATCH 2172/3326] ptr_ring: try vmalloc() when kmalloc() fails commit 0bf7800f1799b5b1fd7d4f024e9ece53ac489011 upstream. This patch switch to use kvmalloc_array() for using a vmalloc() fallback to help in case kmalloc() fails. Reported-by: syzbot+e4d4f9ddd4295539735d@syzkaller.appspotmail.com Fixes: 2e0ab8ca83c12 ("ptr_ring: array based FIFO for pointers") Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/linux/ptr_ring.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index bb515163bac3f2..35d125569e6848 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -445,11 +445,14 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, __PTR_RING_PEEK_CALL_v; \ }) +/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See + * documentation for vmalloc for which of them are legal. + */ static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) { if (size * sizeof(void *) > KMALLOC_MAX_SIZE) return NULL; - return kcalloc(size, sizeof(void *), gfp); + return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); } static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) @@ -582,7 +585,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, spin_unlock(&(r)->producer_lock); spin_unlock_irqrestore(&(r)->consumer_lock, flags); - kfree(old); + kvfree(old); return 0; } @@ -622,7 +625,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, } for (i = 0; i < nrings; ++i) - kfree(queues[i]); + kvfree(queues[i]); kfree(queues); @@ -630,7 +633,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, nomem: while (--i >= 0) - kfree(queues[i]); + kvfree(queues[i]); kfree(queues); @@ -645,7 +648,7 @@ static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) if (destroy) while ((ptr = ptr_ring_consume(r))) destroy(ptr); - kfree(r->queue); + kvfree(r->queue); } #endif /* _LINUX_PTR_RING_H */ From 116df867dbc12297d0e04cd6540ecad401f4c060 Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Tue, 28 Nov 2017 18:51:12 -0500 Subject: [PATCH 2173/3326] selinux: ensure the context is NUL terminated in security_context_to_sid_core() commit ef28df55ac27e1e5cd122e19fa311d886d47a756 upstream. The syzbot/syzkaller automated tests found a problem in security_context_to_sid_core() during early boot (before we load the SELinux policy) where we could potentially feed context strings without NUL terminators into the strcmp() function. We already guard against this during normal operation (after the SELinux policy has been loaded) by making a copy of the context strings and explicitly adding a NUL terminator to the end. The patch extends this protection to the early boot case (no loaded policy) by moving the context copy earlier in security_context_to_sid_core(). Reported-by: syzbot Signed-off-by: Paul Moore Reviewed-By: William Roberts Signed-off-by: Greg Kroah-Hartman --- security/selinux/ss/services.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index e4a1c0dc561a40..338af05e06fa1b 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -1413,27 +1413,25 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len, if (!scontext_len) return -EINVAL; + /* Copy the string to allow changes and ensure a NUL terminator */ + scontext2 = kmemdup_nul(scontext, scontext_len, gfp_flags); + if (!scontext2) + return -ENOMEM; + if (!ss_initialized) { int i; for (i = 1; i < SECINITSID_NUM; i++) { - if (!strcmp(initial_sid_to_string[i], scontext)) { + if (!strcmp(initial_sid_to_string[i], scontext2)) { *sid = i; - return 0; + goto out; } } *sid = SECINITSID_KERNEL; - return 0; + goto out; } *sid = SECSID_NULL; - /* Copy the string so that we can modify the copy as we parse it. */ - scontext2 = kmalloc(scontext_len + 1, gfp_flags); - if (!scontext2) - return -ENOMEM; - memcpy(scontext2, scontext, scontext_len); - scontext2[scontext_len] = 0; - if (force) { /* Save another copy for storing in uninterpreted form */ rc = -ENOMEM; From ca181454e726e47434566d9a126e9cfc29db48ae Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Tue, 5 Dec 2017 17:17:43 -0500 Subject: [PATCH 2174/3326] selinux: skip bounded transition processing if the policy isn't loaded commit 4b14752ec4e0d87126e636384cf37c8dd9df157c upstream. We can't do anything reasonable in security_bounded_transition() if we don't have a policy loaded, and in fact we could run into problems with some of the code inside expecting a policy. Fix these problems like we do many others in security/selinux/ss/services.c by checking to see if the policy is loaded (ss_initialized) and returning quickly if it isn't. Reported-by: syzbot Signed-off-by: Paul Moore Acked-by: Stephen Smalley Reviewed-by: James Morris Signed-off-by: Greg Kroah-Hartman --- security/selinux/ss/services.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 338af05e06fa1b..c9c031e3d1ae86 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -867,6 +867,9 @@ int security_bounded_transition(u32 old_sid, u32 new_sid) int index; int rc; + if (!ss_initialized) + return 0; + read_lock(&policy_rwlock); rc = -EINVAL; From 9bae74042eb17eeaed782f85438a7a131fe68282 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 2 Nov 2017 09:52:27 -0400 Subject: [PATCH 2175/3326] media: pvrusb2: properly check endpoint types commit 72c27a68a2a3f650f0dc7891ee98f02283fc11af upstream. As syzkaller detected, pvrusb2 driver submits bulk urb withount checking the the endpoint type is actually blunk. Add a check. usb 1-1: BOGUS urb xfer, pipe 3 != type 1 ------------[ cut here ]------------ WARNING: CPU: 1 PID: 2713 at drivers/usb/core/urb.c:449 usb_submit_urb+0xf8a/0x11d0 Modules linked in: CPU: 1 PID: 2713 Comm: pvrusb2-context Not tainted 4.14.0-rc1-42251-gebb2c2437d80 #210 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 task: ffff88006b7a18c0 task.stack: ffff880069978000 RIP: 0010:usb_submit_urb+0xf8a/0x11d0 drivers/usb/core/urb.c:448 RSP: 0018:ffff88006997f990 EFLAGS: 00010286 RAX: 0000000000000029 RBX: ffff880063661900 RCX: 0000000000000000 RDX: 0000000000000029 RSI: ffffffff86876d60 RDI: ffffed000d32ff24 RBP: ffff88006997fa90 R08: 1ffff1000d32fdca R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: 1ffff1000d32ff39 R13: 0000000000000001 R14: 0000000000000003 R15: ffff880068bbed68 FS: 0000000000000000(0000) GS:ffff88006c600000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000001032000 CR3: 000000006a0ff000 CR4: 00000000000006f0 Call Trace: pvr2_send_request_ex+0xa57/0x1d80 drivers/media/usb/pvrusb2/pvrusb2-hdw.c:3645 pvr2_hdw_check_firmware drivers/media/usb/pvrusb2/pvrusb2-hdw.c:1812 pvr2_hdw_setup_low drivers/media/usb/pvrusb2/pvrusb2-hdw.c:2107 pvr2_hdw_setup drivers/media/usb/pvrusb2/pvrusb2-hdw.c:2250 pvr2_hdw_initialize+0x548/0x3c10 drivers/media/usb/pvrusb2/pvrusb2-hdw.c:2327 pvr2_context_check drivers/media/usb/pvrusb2/pvrusb2-context.c:118 pvr2_context_thread_func+0x361/0x8c0 drivers/media/usb/pvrusb2/pvrusb2-context.c:167 kthread+0x3a1/0x470 kernel/kthread.c:231 ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431 Code: 48 8b 85 30 ff ff ff 48 8d b8 98 00 00 00 e8 ee 82 89 fe 45 89 e8 44 89 f1 4c 89 fa 48 89 c6 48 c7 c7 40 c0 ea 86 e8 30 1b dc fc <0f> ff e9 9b f7 ff ff e8 aa 95 25 fd e9 80 f7 ff ff e8 50 74 f3 ---[ end trace 6919030503719da6 ]--- Signed-off-by: Andrey Konovalov Signed-off-by: Greg Kroah-Hartman Signed-off-by: Mauro Carvalho Chehab --- drivers/media/usb/pvrusb2/pvrusb2-hdw.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c index ad5b25b896990e..44975061b95360 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c @@ -3642,6 +3642,12 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw, hdw); hdw->ctl_write_urb->actual_length = 0; hdw->ctl_write_pend_flag = !0; + if (usb_urb_ep_type_check(hdw->ctl_write_urb)) { + pvr2_trace( + PVR2_TRACE_ERROR_LEGS, + "Invalid write control endpoint"); + return -EINVAL; + } status = usb_submit_urb(hdw->ctl_write_urb,GFP_KERNEL); if (status < 0) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, @@ -3666,6 +3672,12 @@ status); hdw); hdw->ctl_read_urb->actual_length = 0; hdw->ctl_read_pend_flag = !0; + if (usb_urb_ep_type_check(hdw->ctl_read_urb)) { + pvr2_trace( + PVR2_TRACE_ERROR_LEGS, + "Invalid read control endpoint"); + return -EINVAL; + } status = usb_submit_urb(hdw->ctl_read_urb,GFP_KERNEL); if (status < 0) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, From 392e03283a3ddddf7c48dcc002b8668e1612a578 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 18 Dec 2017 16:40:26 -0800 Subject: [PATCH 2176/3326] crypto: x86/twofish-3way - Fix %rbp usage commit d8c7fe9f2a486a6e5f0d5229ca43807af5ab22c6 upstream. Using %rbp as a temporary register breaks frame pointer convention and breaks stack traces when unwinding from an interrupt in the crypto code. In twofish-3way, we can't simply replace %rbp with another register because there are none available. Instead, we use the stack to hold the values that %rbp, %r11, and %r12 were holding previously. Each of these values represents the half of the output from the previous Feistel round that is being passed on unchanged to the following round. They are only used once per round, when they are exchanged with %rax, %rbx, and %rcx. As a result, we free up 3 registers (one per block) and can reassign them so that %rbp is not used, and additionally %r14 and %r15 are not used so they do not need to be saved/restored. There may be a small overhead caused by replacing 'xchg REG, REG' with the needed sequence 'mov MEM, REG; mov REG, MEM; mov REG, REG' once per round. But, counterintuitively, when I tested "ctr-twofish-3way" on a Haswell processor, the new version was actually about 2% faster. (Perhaps 'xchg' is not as well optimized as plain moves.) Reported-by: syzbot Signed-off-by: Eric Biggers Reviewed-by: Josh Poimboeuf Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- arch/x86/crypto/twofish-x86_64-asm_64-3way.S | 112 ++++++++++--------- 1 file changed, 60 insertions(+), 52 deletions(-) diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S index 1c3b7ceb36d24c..e7273a606a07ab 100644 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S @@ -55,29 +55,31 @@ #define RAB1bl %bl #define RAB2bl %cl +#define CD0 0x0(%rsp) +#define CD1 0x8(%rsp) +#define CD2 0x10(%rsp) + +# used only before/after all rounds #define RCD0 %r8 #define RCD1 %r9 #define RCD2 %r10 -#define RCD0d %r8d -#define RCD1d %r9d -#define RCD2d %r10d - -#define RX0 %rbp -#define RX1 %r11 -#define RX2 %r12 +# used only during rounds +#define RX0 %r8 +#define RX1 %r9 +#define RX2 %r10 -#define RX0d %ebp -#define RX1d %r11d -#define RX2d %r12d +#define RX0d %r8d +#define RX1d %r9d +#define RX2d %r10d -#define RY0 %r13 -#define RY1 %r14 -#define RY2 %r15 +#define RY0 %r11 +#define RY1 %r12 +#define RY2 %r13 -#define RY0d %r13d -#define RY1d %r14d -#define RY2d %r15d +#define RY0d %r11d +#define RY1d %r12d +#define RY2d %r13d #define RT0 %rdx #define RT1 %rsi @@ -85,6 +87,8 @@ #define RT0d %edx #define RT1d %esi +#define RT1bl %sil + #define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \ movzbl ab ## bl, tmp2 ## d; \ movzbl ab ## bh, tmp1 ## d; \ @@ -92,6 +96,11 @@ op1##l T0(CTX, tmp2, 4), dst ## d; \ op2##l T1(CTX, tmp1, 4), dst ## d; +#define swap_ab_with_cd(ab, cd, tmp) \ + movq cd, tmp; \ + movq ab, cd; \ + movq tmp, ab; + /* * Combined G1 & G2 function. Reordered with help of rotates to have moves * at begining. @@ -110,15 +119,15 @@ /* G1,2 && G2,2 */ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \ - xchgq cd ## 0, ab ## 0; \ + swap_ab_with_cd(ab ## 0, cd ## 0, RT0); \ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \ - xchgq cd ## 1, ab ## 1; \ + swap_ab_with_cd(ab ## 1, cd ## 1, RT0); \ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \ - xchgq cd ## 2, ab ## 2; + swap_ab_with_cd(ab ## 2, cd ## 2, RT0); #define enc_round_end(ab, x, y, n) \ addl y ## d, x ## d; \ @@ -168,6 +177,16 @@ decrypt_round3(ba, dc, (n*2)+1); \ decrypt_round3(ba, dc, (n*2)); +#define push_cd() \ + pushq RCD2; \ + pushq RCD1; \ + pushq RCD0; + +#define pop_cd() \ + popq RCD0; \ + popq RCD1; \ + popq RCD2; + #define inpack3(in, n, xy, m) \ movq 4*(n)(in), xy ## 0; \ xorq w+4*m(CTX), xy ## 0; \ @@ -223,11 +242,8 @@ ENTRY(__twofish_enc_blk_3way) * %rdx: src, RIO * %rcx: bool, if true: xor output */ - pushq %r15; - pushq %r14; pushq %r13; pushq %r12; - pushq %rbp; pushq %rbx; pushq %rcx; /* bool xor */ @@ -235,40 +251,36 @@ ENTRY(__twofish_enc_blk_3way) inpack_enc3(); - encrypt_cycle3(RAB, RCD, 0); - encrypt_cycle3(RAB, RCD, 1); - encrypt_cycle3(RAB, RCD, 2); - encrypt_cycle3(RAB, RCD, 3); - encrypt_cycle3(RAB, RCD, 4); - encrypt_cycle3(RAB, RCD, 5); - encrypt_cycle3(RAB, RCD, 6); - encrypt_cycle3(RAB, RCD, 7); + push_cd(); + encrypt_cycle3(RAB, CD, 0); + encrypt_cycle3(RAB, CD, 1); + encrypt_cycle3(RAB, CD, 2); + encrypt_cycle3(RAB, CD, 3); + encrypt_cycle3(RAB, CD, 4); + encrypt_cycle3(RAB, CD, 5); + encrypt_cycle3(RAB, CD, 6); + encrypt_cycle3(RAB, CD, 7); + pop_cd(); popq RIO; /* dst */ - popq %rbp; /* bool xor */ + popq RT1; /* bool xor */ - testb %bpl, %bpl; + testb RT1bl, RT1bl; jnz .L__enc_xor3; outunpack_enc3(mov); popq %rbx; - popq %rbp; popq %r12; popq %r13; - popq %r14; - popq %r15; ret; .L__enc_xor3: outunpack_enc3(xor); popq %rbx; - popq %rbp; popq %r12; popq %r13; - popq %r14; - popq %r15; ret; ENDPROC(__twofish_enc_blk_3way) @@ -278,35 +290,31 @@ ENTRY(twofish_dec_blk_3way) * %rsi: dst * %rdx: src, RIO */ - pushq %r15; - pushq %r14; pushq %r13; pushq %r12; - pushq %rbp; pushq %rbx; pushq %rsi; /* dst */ inpack_dec3(); - decrypt_cycle3(RAB, RCD, 7); - decrypt_cycle3(RAB, RCD, 6); - decrypt_cycle3(RAB, RCD, 5); - decrypt_cycle3(RAB, RCD, 4); - decrypt_cycle3(RAB, RCD, 3); - decrypt_cycle3(RAB, RCD, 2); - decrypt_cycle3(RAB, RCD, 1); - decrypt_cycle3(RAB, RCD, 0); + push_cd(); + decrypt_cycle3(RAB, CD, 7); + decrypt_cycle3(RAB, CD, 6); + decrypt_cycle3(RAB, CD, 5); + decrypt_cycle3(RAB, CD, 4); + decrypt_cycle3(RAB, CD, 3); + decrypt_cycle3(RAB, CD, 2); + decrypt_cycle3(RAB, CD, 1); + decrypt_cycle3(RAB, CD, 0); + pop_cd(); popq RIO; /* dst */ outunpack_dec3(); popq %rbx; - popq %rbp; popq %r12; popq %r13; - popq %r14; - popq %r15; ret; ENDPROC(twofish_dec_blk_3way) From 747ad3d315923647ca4d79d92b28b512e85cd96e Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 5 Jan 2018 11:14:08 -0800 Subject: [PATCH 2177/3326] staging: android: ion: Add __GFP_NOWARN for system contig heap commit 0c75f10312a35b149b2cebb1832316b35c2337ca upstream. syzbot reported a warning from Ion: WARNING: CPU: 1 PID: 3485 at mm/page_alloc.c:3926 ... __alloc_pages_nodemask+0x9fb/0xd80 mm/page_alloc.c:4252 alloc_pages_current+0xb6/0x1e0 mm/mempolicy.c:2036 alloc_pages include/linux/gfp.h:492 [inline] ion_system_contig_heap_allocate+0x40/0x2c0 drivers/staging/android/ion/ion_system_heap.c:374 ion_buffer_create drivers/staging/android/ion/ion.c:93 [inline] ion_alloc+0x2c1/0x9e0 drivers/staging/android/ion/ion.c:420 ion_ioctl+0x26d/0x380 drivers/staging/android/ion/ion-ioctl.c:84 vfs_ioctl fs/ioctl.c:46 [inline] do_vfs_ioctl+0x1b1/0x1520 fs/ioctl.c:686 SYSC_ioctl fs/ioctl.c:701 [inline] SyS_ioctl+0x8f/0xc0 fs/ioctl.c:692 This is a warning about attempting to allocate order > MAX_ORDER. This is coming from a userspace Ion allocation request. Since userspace is free to request however much memory it wants (and the kernel is free to deny its allocation), silence the allocation attempt with __GFP_NOWARN in case it fails. Reported-by: syzbot+76e7efc4748495855a4d@syzkaller.appspotmail.com Reported-by: syzbot Signed-off-by: Laura Abbott Signed-off-by: Greg Kroah-Hartman --- drivers/staging/android/ion/ion_system_heap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 4dc5d7a589c28b..b6ece18e6a8817 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -371,7 +371,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap, unsigned long i; int ret; - page = alloc_pages(low_order_gfp_flags, order); + page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order); if (!page) return -ENOMEM; From 2c565a9538a13e0b427ff7c3fbd2ee8e8cd59709 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 5 Jan 2018 11:14:09 -0800 Subject: [PATCH 2178/3326] staging: android: ion: Switch from WARN to pr_warn commit e4e179a844f52e907e550f887d0a2171f1508af1 upstream. Syzbot reported a warning with Ion: WARNING: CPU: 0 PID: 3502 at drivers/staging/android/ion/ion-ioctl.c:73 ion_ioctl+0x2db/0x380 drivers/staging/android/ion/ion-ioctl.c:73 Kernel panic - not syncing: panic_on_warn set ... This is a warning that validation of the ioctl fields failed. This was deliberately added as a warning to make it very obvious to developers that something needed to be fixed. In reality, this is overkill and disturbs fuzzing. Switch to pr_warn for a message instead. Reported-by: syzbot+fa2d5f63ee5904a0115a@syzkaller.appspotmail.com Reported-by: syzbot Signed-off-by: Laura Abbott Signed-off-by: Greg Kroah-Hartman --- drivers/staging/android/ion/ion-ioctl.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c index d9f8b1424da18b..021a956db1a894 100644 --- a/drivers/staging/android/ion/ion-ioctl.c +++ b/drivers/staging/android/ion/ion-ioctl.c @@ -71,8 +71,10 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -EFAULT; ret = validate_ioctl_arg(cmd, &data); - if (WARN_ON_ONCE(ret)) + if (ret) { + pr_warn_once("%s: ioctl validate failed\n", __func__); return ret; + } if (!(dir & _IOC_WRITE)) memset(&data, 0, sizeof(data)); From 2cc50a1946e69400399575f8ea63c2131baa83b7 Mon Sep 17 00:00:00 2001 From: Douglas Gilbert Date: Sun, 14 Jan 2018 17:00:48 -0500 Subject: [PATCH 2179/3326] blk_rq_map_user_iov: fix error override commit 69e0927b3774563c19b5fb32e91d75edc147fb62 upstream. During stress tests by syzkaller on the sg driver the block layer infrequently returns EINVAL. Closer inspection shows the block layer was trying to return ENOMEM (which is much more understandable) but for some reason overroad that useful error. Patch below does not show this (unchanged) line: ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); That 'ret' was being overridden when that function failed. Signed-off-by: Douglas Gilbert Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- block/blk-map.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/block/blk-map.c b/block/blk-map.c index 368daa02714e31..e31be14da8ea1a 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -126,7 +126,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); struct bio *bio = NULL; struct iov_iter i; - int ret; + int ret = -EINVAL; if (!iter_is_iovec(iter)) goto fail; @@ -155,7 +155,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, __blk_rq_unmap_user(bio); fail: rq->bio = NULL; - return -EINVAL; + return ret; } EXPORT_SYMBOL(blk_rq_map_user_iov); From 7b5ec5505945ee2ba6f6dbab51483a1102720623 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Wed, 13 Dec 2017 10:46:40 +0100 Subject: [PATCH 2180/3326] KVM: x86: fix escape of guest dr6 to the host MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit efdab992813fb2ed825745625b83c05032e9cda2 upstream. syzkaller reported: WARNING: CPU: 0 PID: 12927 at arch/x86/kernel/traps.c:780 do_debug+0x222/0x250 CPU: 0 PID: 12927 Comm: syz-executor Tainted: G OE 4.15.0-rc2+ #16 RIP: 0010:do_debug+0x222/0x250 Call Trace: <#DB> debug+0x3e/0x70 RIP: 0010:copy_user_enhanced_fast_string+0x10/0x20 _copy_from_user+0x5b/0x90 SyS_timer_create+0x33/0x80 entry_SYSCALL_64_fastpath+0x23/0x9a The testcase sets a watchpoint (with perf_event_open) on a buffer that is passed to timer_create() as the struct sigevent argument. In timer_create(), copy_from_user()'s rep movsb triggers the BP. The testcase also sets the debug registers for the guest. However, KVM only restores host debug registers when the host has active watchpoints, which triggers a race condition when running the testcase with multiple threads. The guest's DR6.BS bit can escape to the host before another thread invokes timer_create(), and do_debug() complains. The fix is to respect do_debug()'s dr6 invariant when leaving KVM. Reported-by: Dmitry Vyukov Cc: Paolo Bonzini Cc: Radim Krčmář Cc: David Hildenbrand Cc: Dmitry Vyukov Reviewed-by: David Hildenbrand Signed-off-by: Wanpeng Li Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/x86.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8b7ede04e8a347..0dcd7bf45dc11c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2926,6 +2926,12 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); vcpu->arch.last_host_tsc = rdtsc(); + /* + * If userspace has set any breakpoints or watchpoints, dr6 is restored + * on every vmexit, but if not, we might have a stale dr6 from the + * guest. do_debug expects dr6 to be cleared after it runs, do the same. + */ + set_debugreg(0, 6); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, From 4249e8af8108afadc6316d9e4815f3333c9770d0 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Tue, 6 Feb 2018 15:40:28 -0800 Subject: [PATCH 2181/3326] kcov: detect double association with a single task commit a77660d231f8b3d84fd23ed482e0964f7aa546d6 upstream. Currently KCOV_ENABLE does not check if the current task is already associated with another kcov descriptor. As the result it is possible to associate a single task with more than one kcov descriptor, which later leads to a memory leak of the old descriptor. This relation is really meant to be one-to-one (task has only one back link). Extend validation to detect such misuse. Link: http://lkml.kernel.org/r/20180122082520.15716-1-dvyukov@google.com Fixes: 5c9a8750a640 ("kernel: add kcov code coverage") Signed-off-by: Dmitry Vyukov Reported-by: Shankara Pailoor Cc: Dmitry Vyukov Cc: syzbot Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- kernel/kcov.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/kcov.c b/kernel/kcov.c index fc6af9e1308b7a..b11ef6e51f7e76 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c @@ -225,9 +225,9 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED || kcov->area == NULL) return -EINVAL; - if (kcov->t != NULL) - return -EBUSY; t = current; + if (kcov->t != NULL || t->kcov != NULL) + return -EBUSY; /* Cache in task struct for performance. */ t->kcov_size = kcov->size; t->kcov_area = kcov->area; From 6609d112229fff5dbccf57bbc31facffc1ba06dd Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Thu, 28 Dec 2017 09:48:54 +0100 Subject: [PATCH 2182/3326] netfilter: x_tables: fix int overflow in xt_alloc_table_info() commit 889c604fd0b5f6d3b8694ade229ee44124de1127 upstream. syzkaller triggered OOM kills by passing ipt_replace.size = -1 to IPT_SO_SET_REPLACE. The root cause is that SMP_ALIGN() in xt_alloc_table_info() causes int overflow and the size check passes when it should not. SMP_ALIGN() is no longer needed leftover. Remove SMP_ALIGN() call in xt_alloc_table_info(). Reported-by: syzbot+4396883fa8c4f64e0175@syzkaller.appspotmail.com Signed-off-by: Dmitry Vyukov Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman --- net/netfilter/x_tables.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index d8571f4142080a..20f7856fec16aa 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -39,7 +39,6 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte "); MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); -#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) #define XT_PCPU_BLOCK_SIZE 4096 struct compat_delta { @@ -1000,7 +999,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size) return NULL; /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ - if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) + if ((size >> PAGE_SHIFT) + 2 > totalram_pages) return NULL; info = kvmalloc(sz, GFP_KERNEL); From f25f048d3bde992239b7e019467b4fb4832bac99 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 24 Jan 2018 17:16:09 -0800 Subject: [PATCH 2183/3326] netfilter: x_tables: avoid out-of-bounds reads in xt_request_find_{match|target} commit da17c73b6eb74aad3c3c0654394635675b623b3e upstream. It looks like syzbot found its way into netfilter territory. Issue here is that @name comes from user space and might not be null terminated. Out-of-bound reads happen, KASAN is not happy. v2 added similar fix for xt_request_find_target(), as Florian advised. Signed-off-by: Eric Dumazet Reported-by: syzbot Acked-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman --- net/netfilter/x_tables.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 20f7856fec16aa..60c92158a2cd56 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -209,6 +209,9 @@ xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision) { struct xt_match *match; + if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN) + return ERR_PTR(-EINVAL); + match = xt_find_match(nfproto, name, revision); if (IS_ERR(match)) { request_module("%st_%s", xt_prefix[nfproto], name); @@ -251,6 +254,9 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) { struct xt_target *target; + if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN) + return ERR_PTR(-EINVAL); + target = xt_find_target(af, name, revision); if (IS_ERR(target)) { request_module("%st_%s", xt_prefix[af], name); From 2852a7dd15fe311e96be2d36a7361faac471b7a9 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Tue, 30 Jan 2018 15:21:34 +0100 Subject: [PATCH 2184/3326] netfilter: ipt_CLUSTERIP: fix out-of-bounds accesses in clusterip_tg_check() commit 1a38956cce5eabd7b74f94bab70265e4df83165e upstream. Commit 136e92bbec0a switched local_nodes from an array to a bitmask but did not add proper bounds checks. As the result clusterip_config_init_nodelist() can both over-read ipt_clusterip_tgt_info.local_nodes and over-write clusterip_config.local_nodes. Add bounds checks for both. Fixes: 136e92bbec0a ("[NETFILTER] CLUSTERIP: use a bitmap to store node responsibility data") Signed-off-by: Dmitry Vyukov Reported-by: syzbot Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman --- net/ipv4/netfilter/ipt_CLUSTERIP.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 17b4ca562944c3..24a8c2e63e3d5d 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -431,7 +431,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) struct ipt_clusterip_tgt_info *cipinfo = par->targinfo; const struct ipt_entry *e = par->entryinfo; struct clusterip_config *config; - int ret; + int ret, i; if (par->nft_compat) { pr_err("cannot use CLUSTERIP target from nftables compat\n"); @@ -450,8 +450,18 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) pr_info("Please specify destination IP\n"); return -EINVAL; } - - /* FIXME: further sanity checks */ + if (cipinfo->num_local_nodes > ARRAY_SIZE(cipinfo->local_nodes)) { + pr_info("bad num_local_nodes %u\n", cipinfo->num_local_nodes); + return -EINVAL; + } + for (i = 0; i < cipinfo->num_local_nodes; i++) { + if (cipinfo->local_nodes[i] - 1 >= + sizeof(config->local_nodes) * 8) { + pr_info("bad local_nodes[%d] %u\n", + i, cipinfo->local_nodes[i]); + return -EINVAL; + } + } config = clusterip_config_find_get(par->net, e->ip.dst.s_addr, 1); if (!config) { From 516c855cf51462678eb50ec52da845c10734d0bf Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Tue, 30 Jan 2018 19:01:40 +0100 Subject: [PATCH 2185/3326] netfilter: on sockopt() acquire sock lock only in the required scope commit 3f34cfae1238848fd53f25e5c8fd59da57901f4b upstream. Syzbot reported several deadlocks in the netfilter area caused by rtnl lock and socket lock being acquired with a different order on different code paths, leading to backtraces like the following one: ====================================================== WARNING: possible circular locking dependency detected 4.15.0-rc9+ #212 Not tainted ------------------------------------------------------ syzkaller041579/3682 is trying to acquire lock: (sk_lock-AF_INET6){+.+.}, at: [<000000008775e4dd>] lock_sock include/net/sock.h:1463 [inline] (sk_lock-AF_INET6){+.+.}, at: [<000000008775e4dd>] do_ipv6_setsockopt.isra.8+0x3c5/0x39d0 net/ipv6/ipv6_sockglue.c:167 but task is already holding lock: (rtnl_mutex){+.+.}, at: [<000000004342eaa9>] rtnl_lock+0x17/0x20 net/core/rtnetlink.c:74 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (rtnl_mutex){+.+.}: __mutex_lock_common kernel/locking/mutex.c:756 [inline] __mutex_lock+0x16f/0x1a80 kernel/locking/mutex.c:893 mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:908 rtnl_lock+0x17/0x20 net/core/rtnetlink.c:74 register_netdevice_notifier+0xad/0x860 net/core/dev.c:1607 tee_tg_check+0x1a0/0x280 net/netfilter/xt_TEE.c:106 xt_check_target+0x22c/0x7d0 net/netfilter/x_tables.c:845 check_target net/ipv6/netfilter/ip6_tables.c:538 [inline] find_check_entry.isra.7+0x935/0xcf0 net/ipv6/netfilter/ip6_tables.c:580 translate_table+0xf52/0x1690 net/ipv6/netfilter/ip6_tables.c:749 do_replace net/ipv6/netfilter/ip6_tables.c:1165 [inline] do_ip6t_set_ctl+0x370/0x5f0 net/ipv6/netfilter/ip6_tables.c:1691 nf_sockopt net/netfilter/nf_sockopt.c:106 [inline] nf_setsockopt+0x67/0xc0 net/netfilter/nf_sockopt.c:115 ipv6_setsockopt+0x115/0x150 net/ipv6/ipv6_sockglue.c:928 udpv6_setsockopt+0x45/0x80 net/ipv6/udp.c:1422 sock_common_setsockopt+0x95/0xd0 net/core/sock.c:2978 SYSC_setsockopt net/socket.c:1849 [inline] SyS_setsockopt+0x189/0x360 net/socket.c:1828 entry_SYSCALL_64_fastpath+0x29/0xa0 -> #0 (sk_lock-AF_INET6){+.+.}: lock_acquire+0x1d5/0x580 kernel/locking/lockdep.c:3914 lock_sock_nested+0xc2/0x110 net/core/sock.c:2780 lock_sock include/net/sock.h:1463 [inline] do_ipv6_setsockopt.isra.8+0x3c5/0x39d0 net/ipv6/ipv6_sockglue.c:167 ipv6_setsockopt+0xd7/0x150 net/ipv6/ipv6_sockglue.c:922 udpv6_setsockopt+0x45/0x80 net/ipv6/udp.c:1422 sock_common_setsockopt+0x95/0xd0 net/core/sock.c:2978 SYSC_setsockopt net/socket.c:1849 [inline] SyS_setsockopt+0x189/0x360 net/socket.c:1828 entry_SYSCALL_64_fastpath+0x29/0xa0 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(rtnl_mutex); lock(sk_lock-AF_INET6); lock(rtnl_mutex); lock(sk_lock-AF_INET6); *** DEADLOCK *** 1 lock held by syzkaller041579/3682: #0: (rtnl_mutex){+.+.}, at: [<000000004342eaa9>] rtnl_lock+0x17/0x20 net/core/rtnetlink.c:74 The problem, as Florian noted, is that nf_setsockopt() is always called with the socket held, even if the lock itself is required only for very tight scopes and only for some operation. This patch addresses the issues moving the lock_sock() call only where really needed, namely in ipv*_getorigdst(), so that nf_setsockopt() does not need anymore to acquire both locks. Fixes: 22265a5c3c10 ("netfilter: xt_TEE: resolve oif using netdevice notifiers") Reported-by: syzbot+a4c2dc980ac1af699b36@syzkaller.appspotmail.com Suggested-by: Florian Westphal Signed-off-by: Paolo Abeni Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman --- net/ipv4/ip_sockglue.c | 14 ++++---------- net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 6 +++++- net/ipv6/ipv6_sockglue.c | 17 +++++------------ net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 18 ++++++++++++------ 4 files changed, 26 insertions(+), 29 deletions(-) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 60fb1eb7d7d80d..c7df4969f80aad 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1251,11 +1251,8 @@ int ip_setsockopt(struct sock *sk, int level, if (err == -ENOPROTOOPT && optname != IP_HDRINCL && optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY && - !ip_mroute_opt(optname)) { - lock_sock(sk); + !ip_mroute_opt(optname)) err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); - release_sock(sk); - } #endif return err; } @@ -1280,12 +1277,9 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname, if (err == -ENOPROTOOPT && optname != IP_HDRINCL && optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY && - !ip_mroute_opt(optname)) { - lock_sock(sk); - err = compat_nf_setsockopt(sk, PF_INET, optname, - optval, optlen); - release_sock(sk); - } + !ip_mroute_opt(optname)) + err = compat_nf_setsockopt(sk, PF_INET, optname, optval, + optlen); #endif return err; } diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index fe374da4bc13ef..997a96896f1a60 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -218,15 +218,19 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) struct nf_conntrack_tuple tuple; memset(&tuple, 0, sizeof(tuple)); + + lock_sock(sk); tuple.src.u3.ip = inet->inet_rcv_saddr; tuple.src.u.tcp.port = inet->inet_sport; tuple.dst.u3.ip = inet->inet_daddr; tuple.dst.u.tcp.port = inet->inet_dport; tuple.src.l3num = PF_INET; tuple.dst.protonum = sk->sk_protocol; + release_sock(sk); /* We only do TCP and SCTP at the moment: is there a better way? */ - if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) { + if (tuple.dst.protonum != IPPROTO_TCP && + tuple.dst.protonum != IPPROTO_SCTP) { pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n"); return -ENOPROTOOPT; } diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 3b251760cb8c34..24b69384bdbff4 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -915,12 +915,8 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && - optname != IPV6_XFRM_POLICY) { - lock_sock(sk); - err = nf_setsockopt(sk, PF_INET6, optname, optval, - optlen); - release_sock(sk); - } + optname != IPV6_XFRM_POLICY) + err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen); #endif return err; } @@ -950,12 +946,9 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && - optname != IPV6_XFRM_POLICY) { - lock_sock(sk); - err = compat_nf_setsockopt(sk, PF_INET6, optname, - optval, optlen); - release_sock(sk); - } + optname != IPV6_XFRM_POLICY) + err = compat_nf_setsockopt(sk, PF_INET6, optname, optval, + optlen); #endif return err; } diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index fe01dc953c56ab..b807478c4f7f9c 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -226,20 +226,27 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = { static int ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len) { - const struct inet_sock *inet = inet_sk(sk); + struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 }; const struct ipv6_pinfo *inet6 = inet6_sk(sk); + const struct inet_sock *inet = inet_sk(sk); const struct nf_conntrack_tuple_hash *h; struct sockaddr_in6 sin6; - struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 }; struct nf_conn *ct; + __be32 flow_label; + int bound_dev_if; + lock_sock(sk); tuple.src.u3.in6 = sk->sk_v6_rcv_saddr; tuple.src.u.tcp.port = inet->inet_sport; tuple.dst.u3.in6 = sk->sk_v6_daddr; tuple.dst.u.tcp.port = inet->inet_dport; tuple.dst.protonum = sk->sk_protocol; + bound_dev_if = sk->sk_bound_dev_if; + flow_label = inet6->flow_label; + release_sock(sk); - if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) + if (tuple.dst.protonum != IPPROTO_TCP && + tuple.dst.protonum != IPPROTO_SCTP) return -ENOPROTOOPT; if (*len < 0 || (unsigned int) *len < sizeof(sin6)) @@ -257,14 +264,13 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len) sin6.sin6_family = AF_INET6; sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port; - sin6.sin6_flowinfo = inet6->flow_label & IPV6_FLOWINFO_MASK; + sin6.sin6_flowinfo = flow_label & IPV6_FLOWINFO_MASK; memcpy(&sin6.sin6_addr, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6, sizeof(sin6.sin6_addr)); nf_ct_put(ct); - sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, - sk->sk_bound_dev_if); + sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, bound_dev_if); return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0; } From be6c08bf9aea46775232e48241b55e768362b9e0 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Wed, 31 Jan 2018 15:02:47 -0800 Subject: [PATCH 2186/3326] netfilter: xt_cgroup: initialize info->priv in cgroup_mt_check_v1() commit ba7cd5d95f25cc6005f687dabdb4e7a6063adda9 upstream. xt_cgroup_info_v1->priv is an internal pointer only used for kernel, we should not trust what user-space provides. Reported-by: Fixes: c38c4597e4bf ("netfilter: implement xt_cgroup cgroup2 path match") Cc: Pablo Neira Ayuso Signed-off-by: Cong Wang Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman --- net/netfilter/xt_cgroup.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c index 1db1ce59079fba..891f4e7e8ea7f5 100644 --- a/net/netfilter/xt_cgroup.c +++ b/net/netfilter/xt_cgroup.c @@ -52,6 +52,7 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par) return -EINVAL; } + info->priv = NULL; if (info->has_path) { cgrp = cgroup_get_from_path(info->path); if (IS_ERR(cgrp)) { From 6e12516df1367e80f25fc204b6002db2a700a6ad Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Mon, 5 Feb 2018 14:41:45 -0800 Subject: [PATCH 2187/3326] netfilter: xt_RATEEST: acquire xt_rateest_mutex for hash insert commit 7dc68e98757a8eccf8ca7a53a29b896f1eef1f76 upstream. rateest_hash is supposed to be protected by xt_rateest_mutex, and, as suggested by Eric, lookup and insert should be atomic, so we should acquire the xt_rateest_mutex once for both. So introduce a non-locking helper for internal use and keep the locking one for external. Reported-by: Fixes: 5859034d7eb8 ("[NETFILTER]: x_tables: add RATEEST target") Signed-off-by: Cong Wang Reviewed-by: Florian Westphal Reviewed-by: Eric Dumazet Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman --- net/netfilter/xt_RATEEST.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index 498b54fd04d7fa..141c295191f653 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c @@ -39,23 +39,31 @@ static void xt_rateest_hash_insert(struct xt_rateest *est) hlist_add_head(&est->list, &rateest_hash[h]); } -struct xt_rateest *xt_rateest_lookup(const char *name) +static struct xt_rateest *__xt_rateest_lookup(const char *name) { struct xt_rateest *est; unsigned int h; h = xt_rateest_hash(name); - mutex_lock(&xt_rateest_mutex); hlist_for_each_entry(est, &rateest_hash[h], list) { if (strcmp(est->name, name) == 0) { est->refcnt++; - mutex_unlock(&xt_rateest_mutex); return est; } } - mutex_unlock(&xt_rateest_mutex); + return NULL; } + +struct xt_rateest *xt_rateest_lookup(const char *name) +{ + struct xt_rateest *est; + + mutex_lock(&xt_rateest_mutex); + est = __xt_rateest_lookup(name); + mutex_unlock(&xt_rateest_mutex); + return est; +} EXPORT_SYMBOL_GPL(xt_rateest_lookup); void xt_rateest_put(struct xt_rateest *est) @@ -100,8 +108,10 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) net_get_random_once(&jhash_rnd, sizeof(jhash_rnd)); - est = xt_rateest_lookup(info->name); + mutex_lock(&xt_rateest_mutex); + est = __xt_rateest_lookup(info->name); if (est) { + mutex_unlock(&xt_rateest_mutex); /* * If estimator parameters are specified, they must match the * existing estimator. @@ -139,11 +149,13 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) info->est = est; xt_rateest_hash_insert(est); + mutex_unlock(&xt_rateest_mutex); return 0; err2: kfree(est); err1: + mutex_unlock(&xt_rateest_mutex); return ret; } From c64534f82608a8f9d58d7c74d4a12122bdb2c3fe Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Thu, 30 Nov 2017 11:11:28 -0800 Subject: [PATCH 2188/3326] rds: tcp: correctly sequence cleanup on netns deletion. commit 681648e67d43cf269c5590ecf021ed481f4551fc upstream. Commit 8edc3affc077 ("rds: tcp: Take explicit refcounts on struct net") introduces a regression in rds-tcp netns cleanup. The cleanup_net(), (and thus rds_tcp_dev_event notification) is only called from put_net() when all netns refcounts go to 0, but this cannot happen if the rds_connection itself is holding a c_net ref that it expects to release in rds_tcp_kill_sock. Instead, the rds_tcp_kill_sock callback should make sure to tear down state carefully, ensuring that the socket teardown is only done after all data-structures and workqs that depend on it are quiesced. The original motivation for commit 8edc3affc077 ("rds: tcp: Take explicit refcounts on struct net") was to resolve a race condition reported by syzkaller where workqs for tx/rx/connect were triggered after the namespace was deleted. Those worker threads should have been cancelled/flushed before socket tear-down and indeed, rds_conn_path_destroy() does try to sequence this by doing /* cancel cp_send_w */ /* cancel cp_recv_w */ /* flush cp_down_w */ /* free data structures */ Here the "flush cp_down_w" will trigger rds_conn_shutdown and thus invoke rds_tcp_conn_path_shutdown() to close the tcp socket, so that we ought to have satisfied the requirement that "socket-close is done after all other dependent state is quiesced". However, rds_conn_shutdown has a bug in that it *always* triggers the reconnect workq (and if connection is successful, we always restart tx/rx workqs so with the right timing, we risk the race conditions reported by syzkaller). Netns deletion is like module teardown- no need to restart a reconnect in this case. We can use the c_destroy_in_prog bit to avoid restarting the reconnect. Fixes: 8edc3affc077 ("rds: tcp: Take explicit refcounts on struct net") Signed-off-by: Sowmini Varadhan Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/rds/connection.c | 3 ++- net/rds/rds.h | 6 +++--- net/rds/tcp.c | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/net/rds/connection.c b/net/rds/connection.c index 7ee2d5d68b7816..9efc82c665b5a9 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -366,6 +366,8 @@ void rds_conn_shutdown(struct rds_conn_path *cp) * to the conn hash, so we never trigger a reconnect on this * conn - the reconnect is always triggered by the active peer. */ cancel_delayed_work_sync(&cp->cp_conn_w); + if (conn->c_destroy_in_prog) + return; rcu_read_lock(); if (!hlist_unhashed(&conn->c_hash_node)) { rcu_read_unlock(); @@ -445,7 +447,6 @@ void rds_conn_destroy(struct rds_connection *conn) */ rds_cong_remove_conn(conn); - put_net(conn->c_net); kfree(conn->c_path); kmem_cache_free(rds_conn_slab, conn); diff --git a/net/rds/rds.h b/net/rds/rds.h index c349c71babff45..d09f6c1facb4a8 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -150,7 +150,7 @@ struct rds_connection { /* Protocol version */ unsigned int c_version; - struct net *c_net; + possible_net_t c_net; struct list_head c_map_item; unsigned long c_map_queued; @@ -165,13 +165,13 @@ struct rds_connection { static inline struct net *rds_conn_net(struct rds_connection *conn) { - return conn->c_net; + return read_pnet(&conn->c_net); } static inline void rds_conn_net_set(struct rds_connection *conn, struct net *net) { - conn->c_net = get_net(net); + write_pnet(&conn->c_net, net); } #define RDS_FLAG_CONG_BITMAP 0x01 diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 6b7ee71f40c63e..85bea7a7623071 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -527,7 +527,7 @@ static void rds_tcp_kill_sock(struct net *net) rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { - struct net *c_net = tc->t_cpath->cp_conn->c_net; + struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); if (net != c_net || !tc->t_sock) continue; @@ -586,7 +586,7 @@ static void rds_tcp_sysctl_reset(struct net *net) spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { - struct net *c_net = tc->t_cpath->cp_conn->c_net; + struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); if (net != c_net || !tc->t_sock) continue; From 83ee02de2c8d958b598498a81323635f02bc4b1e Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Thu, 30 Nov 2017 11:11:29 -0800 Subject: [PATCH 2189/3326] rds: tcp: atomically purge entries from rds_tcp_conn_list during netns delete commit f10b4cff98c6977668434fbf5dd58695eeca2897 upstream. The rds_tcp_kill_sock() function parses the rds_tcp_conn_list to find the rds_connection entries marked for deletion as part of the netns deletion under the protection of the rds_tcp_conn_lock. Since the rds_tcp_conn_list tracks rds_tcp_connections (which have a 1:1 mapping with rds_conn_path), multiple tc entries in the rds_tcp_conn_list will map to a single rds_connection, and will be deleted as part of the rds_conn_destroy() operation that is done outside the rds_tcp_conn_lock. The rds_tcp_conn_list traversal done under the protection of rds_tcp_conn_lock should not leave any doomed tc entries in the list after the rds_tcp_conn_lock is released, else another concurrently executiong netns delete (for a differnt netns) thread may trip on these entries. Reported-by: syzbot Signed-off-by: Sowmini Varadhan Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/rds/tcp.c | 9 +++++++-- net/rds/tcp.h | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 85bea7a7623071..2a08bf75d008e8 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -306,7 +306,8 @@ static void rds_tcp_conn_free(void *arg) rdsdebug("freeing tc %p\n", tc); spin_lock_irqsave(&rds_tcp_conn_lock, flags); - list_del(&tc->t_tcp_node); + if (!tc->t_tcp_node_detached) + list_del(&tc->t_tcp_node); spin_unlock_irqrestore(&rds_tcp_conn_lock, flags); kmem_cache_free(rds_tcp_conn_slab, tc); @@ -531,8 +532,12 @@ static void rds_tcp_kill_sock(struct net *net) if (net != c_net || !tc->t_sock) continue; - if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) + if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { list_move_tail(&tc->t_tcp_node, &tmp_list); + } else { + list_del(&tc->t_tcp_node); + tc->t_tcp_node_detached = true; + } } spin_unlock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) { diff --git a/net/rds/tcp.h b/net/rds/tcp.h index 1aafbf7c301141..e7858ee8ed8b7c 100644 --- a/net/rds/tcp.h +++ b/net/rds/tcp.h @@ -12,6 +12,7 @@ struct rds_tcp_incoming { struct rds_tcp_connection { struct list_head t_tcp_node; + bool t_tcp_node_detached; struct rds_conn_path *t_cpath; /* t_conn_path_lock synchronizes the connection establishment between * rds_tcp_accept_one and rds_tcp_conn_path_connect From 701ba0df95205187f1afc4f3f520bff57ba4001d Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Tue, 12 Dec 2017 11:39:04 -0500 Subject: [PATCH 2190/3326] net: avoid skb_warn_bad_offload on IS_ERR commit 8d74e9f88d65af8bb2e095aff506aa6eac755ada upstream. skb_warn_bad_offload warns when packets enter the GSO stack that require skb_checksum_help or vice versa. Do not warn on arbitrary bad packets. Packet sockets can craft many. Syzkaller was able to demonstrate another one with eth_type games. In particular, suppress the warning when segmentation returns an error, which is for reasons other than checksum offload. See also commit 36c92474498a ("net: WARN if skb_checksum_help() is called on skb requiring segmentation") for context on this warning. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index ffee085f035713..d33bbed640b18a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2792,7 +2792,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, segs = skb_mac_gso_segment(skb, features); - if (unlikely(skb_needs_check(skb, tx_path))) + if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) skb_warn_bad_offload(skb); return segs; From 14b2ad0bb2fa7bbc5b5b90ec3b3b3e7a68aa10c2 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 27 Jan 2018 10:58:43 -0800 Subject: [PATCH 2191/3326] net_sched: gen_estimator: fix lockdep splat commit 40ca54e3a686f13117f3de0c443f8026dadf7c44 upstream. syzbot reported a lockdep splat in gen_new_estimator() / est_fetch_counters() when attempting to lock est->stats_lock. Since est_fetch_counters() is called from BH context from timer interrupt, we need to block BH as well when calling it from process context. Most qdiscs use per cpu counters and are immune to the problem, but net/sched/act_api.c and net/netfilter/xt_RATEEST.c are using a spinlock to protect their data. They both call gen_new_estimator() while object is created and not yet alive, so this bug could not trigger a deadlock, only a lockdep splat. Fixes: 1c0d32fde5bd ("net_sched: gen_estimator: complete rewrite of rate estimators") Signed-off-by: Eric Dumazet Reported-by: syzbot Acked-by: Cong Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/gen_estimator.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 7c1ffd6f950172..00ecec4891f383 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -159,7 +159,11 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, est->intvl_log = intvl_log; est->cpu_bstats = cpu_bstats; + if (stats_lock) + local_bh_disable(); est_fetch_counters(est, &b); + if (stats_lock) + local_bh_enable(); est->last_bytes = b.bytes; est->last_packets = b.packets; old = rcu_dereference_protected(*rate_est, 1); From 1f4f5777e5fa10f8d4c50aeece7d3eb2e9865d09 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 10 Jan 2018 17:34:45 +0100 Subject: [PATCH 2192/3326] ASoC: ux500: add MODULE_LICENSE tag commit 1783c9d7cb7bc3181b9271665959b87280d98d8e upstream. This adds MODULE_LICENSE/AUTHOR/DESCRIPTION tags to the ux500 platform drivers, to avoid these build warnings: WARNING: modpost: missing MODULE_LICENSE() in sound/soc/ux500/snd-soc-ux500-plat-dma.o WARNING: modpost: missing MODULE_LICENSE() in sound/soc/ux500/snd-soc-ux500-mach-mop500.o The company no longer exists, so the email addresses of the authors don't work any more, but I've added them anyway for consistency. Signed-off-by: Arnd Bergmann Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/ux500/mop500.c | 4 ++++ sound/soc/ux500/ux500_pcm.c | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c index 070a6880980e89..c60a57797640e8 100644 --- a/sound/soc/ux500/mop500.c +++ b/sound/soc/ux500/mop500.c @@ -163,3 +163,7 @@ static struct platform_driver snd_soc_mop500_driver = { }; module_platform_driver(snd_soc_mop500_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ASoC MOP500 board driver"); +MODULE_AUTHOR("Ola Lilja"); diff --git a/sound/soc/ux500/ux500_pcm.c b/sound/soc/ux500/ux500_pcm.c index f12c01dddc8dd5..d35ba7700f462d 100644 --- a/sound/soc/ux500/ux500_pcm.c +++ b/sound/soc/ux500/ux500_pcm.c @@ -165,3 +165,8 @@ int ux500_pcm_unregister_platform(struct platform_device *pdev) return 0; } EXPORT_SYMBOL_GPL(ux500_pcm_unregister_platform); + +MODULE_AUTHOR("Ola Lilja"); +MODULE_AUTHOR("Roger Nilsson"); +MODULE_DESCRIPTION("ASoC UX500 driver"); +MODULE_LICENSE("GPL v2"); From 52d4c1978cb49e7228c4d8c8bcddfdb734a51a3f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 15 Jan 2018 17:04:22 +0100 Subject: [PATCH 2193/3326] video: fbdev/mmp: add MODULE_LICENSE commit c1530ac5a3ce93a1f02adabc4508b5fbf862dfe2 upstream. Kbuild complains about the lack of a license tag in this driver: WARNING: modpost: missing MODULE_LICENSE() in drivers/video/fbdev/mmp/mmp_disp.o This adds the license, author and description tags. Signed-off-by: Arnd Bergmann Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/mmp/core.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/video/fbdev/mmp/core.c b/drivers/video/fbdev/mmp/core.c index a0f496049db7dc..3a6bb6561ba0fa 100644 --- a/drivers/video/fbdev/mmp/core.c +++ b/drivers/video/fbdev/mmp/core.c @@ -23,6 +23,7 @@ #include #include #include +#include #include